summary refs log tree commit diff
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--.ci/before_build_wheel.sh10
-rwxr-xr-x.ci/scripts/calculate_jobs.py30
-rwxr-xr-x.ci/scripts/check_lockfile.py6
-rwxr-xr-x.ci/scripts/prepare_old_deps.sh2
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md3
-rw-r--r--.github/workflows/docker.yml18
-rw-r--r--.github/workflows/docs-pr-netlify.yaml4
-rw-r--r--.github/workflows/docs-pr.yaml8
-rw-r--r--.github/workflows/docs.yaml16
-rw-r--r--.github/workflows/fix_lint.yaml27
-rw-r--r--.github/workflows/latest_deps.yml38
-rw-r--r--.github/workflows/poetry_lockfile.yaml4
-rw-r--r--.github/workflows/push_complement_image.yml10
-rw-r--r--.github/workflows/release-artifacts.yml65
-rw-r--r--.github/workflows/schema.yaml54
-rw-r--r--.github/workflows/tests.yml178
-rw-r--r--.github/workflows/triage-incoming.yml2
-rw-r--r--.github/workflows/triage_labelled.yml2
-rw-r--r--.github/workflows/twisted_trunk.yml40
-rw-r--r--.gitignore1
-rw-r--r--CHANGES.md3207
-rw-r--r--Cargo.lock247
-rw-r--r--LICENSE-AGPL-3.0 (renamed from LICENSE)0
-rw-r--r--LICENSE-COMMERCIAL6
-rw-r--r--README.rst31
-rw-r--r--build_rust.py19
-rw-r--r--changelog.d/17483.bugfix1
-rw-r--r--changelog.d/17510.bugfix1
-rw-r--r--changelog.d/17514.misc1
-rw-r--r--changelog.d/17515.doc3
-rw-r--r--changelog.d/17531.misc1
-rw-r--r--changelog.d/17535.bugfix1
-rw-r--r--changelog.d/17536.misc1
-rwxr-xr-xcontrib/cmdclient/console.py9
-rw-r--r--contrib/cmdclient/http.py8
-rw-r--r--contrib/docker/README.md3
-rw-r--r--contrib/docker/docker-compose.yml2
-rw-r--r--contrib/docker_compose_workers/README.md3
-rw-r--r--contrib/grafana/synapse.json96
-rw-r--r--contrib/graph/graph.py4
-rw-r--r--contrib/vertobot/.gitignore2
-rwxr-xr-xcontrib/vertobot/bot.pl309
-rwxr-xr-xcontrib/vertobot/bridge.pl493
-rw-r--r--contrib/vertobot/config.yaml32
-rw-r--r--contrib/vertobot/cpanfile14
-rw-r--r--contrib/vertobot/verto-example.json207
-rwxr-xr-xdebian/build_virtualenv2
-rw-r--r--debian/changelog319
-rw-r--r--debian/hash_password.127
-rw-r--r--debian/hash_password.1.html182
-rw-r--r--debian/hash_password.ronn13
-rwxr-xr-xdemo/start.sh7
-rw-r--r--docker/Dockerfile171
-rw-r--r--docker/Dockerfile-workers50
-rw-r--r--docker/README.md3
-rw-r--r--docker/complement/Dockerfile11
-rwxr-xr-xdocker/complement/conf/start_for_complement.sh17
-rw-r--r--docker/complement/conf/workers-shared-extra.yaml.j233
-rw-r--r--docker/conf-workers/nginx.conf.j25
-rw-r--r--docker/conf-workers/synapse.supervisord.conf.j23
-rwxr-xr-xdocker/configure_workers_and_start.py22
-rwxr-xr-xdocker/prefix-log7
-rwxr-xr-xdocker/start.py5
-rw-r--r--docs/README.md12
-rw-r--r--docs/SUMMARY.md9
-rw-r--r--docs/admin_api/event_reports.md9
-rw-r--r--docs/admin_api/experimental_features.md1
-rw-r--r--docs/admin_api/media_admin_api.md8
-rw-r--r--docs/admin_api/rooms.md14
-rw-r--r--docs/admin_api/scheduled_tasks.md54
-rw-r--r--docs/admin_api/user_admin_api.md220
-rw-r--r--docs/changelogs/CHANGES-2023.md2202
-rw-r--r--docs/changelogs/CHANGES-2024.md1586
-rw-r--r--docs/code_style.md4
-rw-r--r--docs/development/cas.md64
-rw-r--r--docs/development/contributing_guide.md2
-rw-r--r--docs/development/database_schema.md2
-rw-r--r--docs/development/dependencies.md30
-rw-r--r--docs/development/saml.md40
-rw-r--r--docs/modules/media_repository_callbacks.md66
-rw-r--r--docs/modules/password_auth_provider_callbacks.md30
-rw-r--r--docs/modules/ratelimit_callbacks.md43
-rw-r--r--docs/modules/spam_checker_callbacks.md59
-rw-r--r--docs/modules/third_party_rules_callbacks.md81
-rw-r--r--docs/openid.md57
-rw-r--r--docs/postgres.md12
-rw-r--r--docs/reverse_proxy.md10
-rw-r--r--docs/setup/installation.md35
-rw-r--r--docs/spam_checker.md6
-rw-r--r--docs/sso_mapping_providers.md91
-rw-r--r--docs/upgrade.md101
-rw-r--r--docs/usage/administration/admin_faq.md4
-rw-r--r--docs/usage/administration/backups.md125
-rw-r--r--docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md6
-rw-r--r--docs/usage/administration/monthly_active_users.md3
-rw-r--r--docs/usage/configuration/config_documentation.md4356
-rw-r--r--docs/usage/configuration/user_authentication/README.md2
-rw-r--r--docs/usage/configuration/user_authentication/single_sign_on/README.md6
-rw-r--r--docs/usage/configuration/user_authentication/single_sign_on/cas.md8
-rw-r--r--docs/usage/configuration/user_authentication/single_sign_on/saml.md8
-rw-r--r--docs/workers.md43
-rw-r--r--flake.lock94
-rw-r--r--flake.nix34
-rw-r--r--mypy.ini5
-rw-r--r--poetry.lock2457
-rw-r--r--pylint.cfg280
-rw-r--r--pyproject.toml68
-rw-r--r--rust/Cargo.toml8
-rw-r--r--rust/benches/evaluator.rs5
-rw-r--r--rust/src/acl/mod.rs4
-rw-r--r--rust/src/events/filter.rs107
-rw-r--r--rust/src/events/internal_metadata.rs82
-rw-r--r--rust/src/events/mod.rs8
-rw-r--r--rust/src/http.rs2
-rw-r--r--rust/src/identifier.rs252
-rw-r--r--rust/src/lib.rs17
-rw-r--r--rust/src/matrix_const.rs28
-rw-r--r--rust/src/push/base_rules.rs2
-rw-r--r--rust/src/push/evaluator.rs15
-rw-r--r--rust/src/push/mod.rs55
-rw-r--r--rust/src/push/utils.rs1
-rw-r--r--rust/src/rendezvous/mod.rs26
-rw-r--r--schema/synapse-config.schema.yaml5028
-rw-r--r--schema/v1/Do not edit files in this folder2
-rw-r--r--schema/v1/meta.schema.json29
-rw-r--r--schema/v1/vocab/documentation.html11
-rwxr-xr-xscripts-dev/build_debian_packages.py9
-rwxr-xr-xscripts-dev/check_pydantic_models.py51
-rwxr-xr-xscripts-dev/check_schema_delta.py127
-rwxr-xr-xscripts-dev/complement.sh7
-rwxr-xr-xscripts-dev/gen_config_documentation.py494
-rwxr-xr-xscripts-dev/lint.sh15
-rw-r--r--scripts-dev/mypy_synapse_plugin.py21
-rwxr-xr-xscripts-dev/release.py48
-rw-r--r--stubs/txredisapi.pyi4
-rw-r--r--synapse/__init__.py7
-rw-r--r--synapse/_pydantic_compat.py64
-rwxr-xr-xsynapse/_scripts/generate_workers_map.py4
-rwxr-xr-xsynapse/_scripts/hash_password.py4
-rw-r--r--synapse/_scripts/review_recent_signups.py14
-rwxr-xr-xsynapse/_scripts/synapse_port_db.py68
-rwxr-xr-xsynapse/_scripts/synctl.py6
-rw-r--r--synapse/api/auth/__init__.py4
-rw-r--r--synapse/api/auth/msc3861_delegated.py291
-rw-r--r--synapse/api/auth_blocking.py20
-rw-r--r--synapse/api/constants.py36
-rw-r--r--synapse/api/errors.py27
-rw-r--r--synapse/api/ratelimiting.py49
-rw-r--r--synapse/api/room_versions.py95
-rw-r--r--synapse/api/urls.py45
-rw-r--r--synapse/app/generic_worker.py26
-rw-r--r--synapse/app/homeserver.py24
-rw-r--r--synapse/app/phone_stats_home.py40
-rw-r--r--synapse/appservice/__init__.py2
-rw-r--r--synapse/appservice/scheduler.py37
-rw-r--r--synapse/config/_base.py57
-rw-r--r--synapse/config/_base.pyi17
-rw-r--r--synapse/config/_util.py10
-rw-r--r--synapse/config/appservice.py13
-rw-r--r--synapse/config/captcha.py14
-rw-r--r--synapse/config/cas.py111
-rw-r--r--synapse/config/emailconfig.py366
-rw-r--r--synapse/config/experimental.py180
-rw-r--r--synapse/config/federation.py16
-rw-r--r--synapse/config/homeserver.py8
-rw-r--r--synapse/config/jwt.py2
-rw-r--r--synapse/config/key.py64
-rw-r--r--synapse/config/logger.py27
-rw-r--r--synapse/config/oidc.py59
-rw-r--r--synapse/config/push.py14
-rw-r--r--synapse/config/ratelimiting.py12
-rw-r--r--synapse/config/redis.py27
-rw-r--r--synapse/config/registration.py37
-rw-r--r--synapse/config/repository.py6
-rw-r--r--synapse/config/room_directory.py4
-rw-r--r--synapse/config/saml2.py248
-rw-r--r--synapse/config/server.py107
-rw-r--r--synapse/config/sso.py15
-rw-r--r--synapse/config/tls.py3
-rw-r--r--synapse/config/user_directory.py3
-rw-r--r--synapse/config/user_types.py44
-rw-r--r--synapse/config/voip.py23
-rw-r--r--synapse/config/workers.py58
-rw-r--r--synapse/crypto/keyring.py2
-rw-r--r--synapse/event_auth.py49
-rw-r--r--synapse/events/__init__.py28
-rw-r--r--synapse/events/auto_accept_invites.py85
-rw-r--r--synapse/events/builder.py55
-rw-r--r--synapse/events/presence_router.py2
-rw-r--r--synapse/events/snapshot.py16
-rw-r--r--synapse/events/utils.py5
-rw-r--r--synapse/events/validator.py14
-rw-r--r--synapse/federation/__init__.py3
-rw-r--r--synapse/federation/federation_base.py46
-rw-r--r--synapse/federation/federation_client.py70
-rw-r--r--synapse/federation/federation_server.py27
-rw-r--r--synapse/federation/persistence.py2
-rw-r--r--synapse/federation/sender/__init__.py229
-rw-r--r--synapse/federation/sender/per_destination_queue.py25
-rw-r--r--synapse/federation/transport/client.py27
-rw-r--r--synapse/federation/transport/server/__init__.py4
-rw-r--r--synapse/federation/transport/server/_base.py2
-rw-r--r--synapse/federation/transport/server/federation.py7
-rw-r--r--synapse/federation/units.py28
-rw-r--r--synapse/handlers/account.py8
-rw-r--r--synapse/handlers/account_data.py4
-rw-r--r--synapse/handlers/account_validity.py141
-rw-r--r--synapse/handlers/admin.py215
-rw-r--r--synapse/handlers/appservice.py8
-rw-r--r--synapse/handlers/auth.py177
-rw-r--r--synapse/handlers/cas.py412
-rw-r--r--synapse/handlers/deactivate_account.py39
-rw-r--r--synapse/handlers/delayed_events.py545
-rw-r--r--synapse/handlers/device.py355
-rw-r--r--synapse/handlers/directory.py18
-rw-r--r--synapse/handlers/e2e_keys.py97
-rw-r--r--synapse/handlers/e2e_room_keys.py4
-rw-r--r--synapse/handlers/federation.py69
-rw-r--r--synapse/handlers/federation_event.py23
-rw-r--r--synapse/handlers/identity.py811
-rw-r--r--synapse/handlers/jwt.py16
-rw-r--r--synapse/handlers/message.py86
-rw-r--r--synapse/handlers/oidc.py78
-rw-r--r--synapse/handlers/pagination.py63
-rw-r--r--synapse/handlers/presence.py13
-rw-r--r--synapse/handlers/profile.py191
-rw-r--r--synapse/handlers/register.py131
-rw-r--r--synapse/handlers/relations.py14
-rw-r--r--synapse/handlers/room.py65
-rw-r--r--synapse/handlers/room_member.py281
-rw-r--r--synapse/handlers/room_policy.py96
-rw-r--r--synapse/handlers/room_summary.py69
-rw-r--r--synapse/handlers/saml.py524
-rw-r--r--synapse/handlers/search.py6
-rw-r--r--synapse/handlers/send_email.py230
-rw-r--r--synapse/handlers/set_password.py18
-rw-r--r--synapse/handlers/sliding_sync.py3158
-rw-r--r--synapse/handlers/sliding_sync/__init__.py1691
-rw-r--r--synapse/handlers/sliding_sync/extensions.py879
-rw-r--r--synapse/handlers/sliding_sync/room_lists.py2304
-rw-r--r--synapse/handlers/sliding_sync/store.py128
-rw-r--r--synapse/handlers/sso.py44
-rw-r--r--synapse/handlers/sync.py311
-rw-r--r--synapse/handlers/ui_auth/checkers.py102
-rw-r--r--synapse/handlers/user_directory.py21
-rw-r--r--synapse/handlers/worker_lock.py11
-rw-r--r--synapse/http/client.py42
-rw-r--r--synapse/http/matrixfederationclient.py20
-rw-r--r--synapse/http/proxy.py40
-rw-r--r--synapse/http/proxyagent.py31
-rw-r--r--synapse/http/replicationagent.py4
-rw-r--r--synapse/http/server.py19
-rw-r--r--synapse/http/servlet.py25
-rw-r--r--synapse/http/site.py43
-rw-r--r--synapse/logging/_remote.py4
-rw-r--r--synapse/logging/_terse_json.py1
-rw-r--r--synapse/logging/context.py48
-rw-r--r--synapse/logging/filter.py3
-rw-r--r--synapse/logging/opentracing.py26
-rw-r--r--synapse/logging/scopecontextmanager.py19
-rw-r--r--synapse/media/_base.py261
-rw-r--r--synapse/media/media_repository.py70
-rw-r--r--synapse/media/media_storage.py103
-rw-r--r--synapse/media/storage_provider.py5
-rw-r--r--synapse/media/thumbnailer.py67
-rw-r--r--synapse/media/url_previewer.py17
-rw-r--r--synapse/metrics/__init__.py10
-rw-r--r--synapse/metrics/background_process_metrics.py2
-rw-r--r--synapse/metrics/jemalloc.py3
-rw-r--r--synapse/module_api/__init__.py181
-rw-r--r--synapse/module_api/callbacks/__init__.py8
-rw-r--r--synapse/module_api/callbacks/media_repository_callbacks.py76
-rw-r--r--synapse/module_api/callbacks/ratelimit_callbacks.py74
-rw-r--r--synapse/module_api/callbacks/spamchecker_callbacks.py196
-rw-r--r--synapse/module_api/callbacks/third_party_event_rules_callbacks.py145
-rw-r--r--synapse/notifier.py176
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py31
-rw-r--r--synapse/push/emailpusher.py331
-rw-r--r--synapse/push/httppusher.py22
-rw-r--r--synapse/push/mailer.py1003
-rw-r--r--synapse/push/push_tools.py8
-rw-r--r--synapse/push/push_types.py4
-rw-r--r--synapse/push/pusher.py38
-rw-r--r--synapse/push/pusherpool.py7
-rw-r--r--synapse/replication/http/__init__.py4
-rw-r--r--synapse/replication/http/_base.py6
-rw-r--r--synapse/replication/http/delayed_events.py62
-rw-r--r--synapse/replication/http/federation.py4
-rw-r--r--synapse/replication/http/push.py7
-rw-r--r--synapse/replication/tcp/client.py4
-rw-r--r--synapse/replication/tcp/commands.py3
-rw-r--r--synapse/replication/tcp/handler.py4
-rw-r--r--synapse/replication/tcp/protocol.py1
-rw-r--r--synapse/replication/tcp/resource.py5
-rw-r--r--synapse/replication/tcp/streams/_base.py2
-rw-r--r--synapse/replication/tcp/streams/events.py6
-rw-r--r--synapse/rest/__init__.py9
-rw-r--r--synapse/rest/admin/__init__.py52
-rw-r--r--synapse/rest/admin/devices.py37
-rw-r--r--synapse/rest/admin/event_reports.py9
-rw-r--r--synapse/rest/admin/experimental_features.py3
-rw-r--r--synapse/rest/admin/registration_tokens.py3
-rw-r--r--synapse/rest/admin/rooms.py20
-rw-r--r--synapse/rest/admin/scheduled_tasks.py70
-rw-r--r--synapse/rest/admin/users.py231
-rw-r--r--synapse/rest/client/_base.py4
-rw-r--r--synapse/rest/client/account.py612
-rw-r--r--synapse/rest/client/account_data.py6
-rw-r--r--synapse/rest/client/account_validity.py4
-rw-r--r--synapse/rest/client/appservice_ping.py7
-rw-r--r--synapse/rest/client/auth.py21
-rw-r--r--synapse/rest/client/auth_metadata.py (renamed from synapse/rest/client/auth_issuer.py)47
-rw-r--r--synapse/rest/client/capabilities.py26
-rw-r--r--synapse/rest/client/delayed_events.py111
-rw-r--r--synapse/rest/client/devices.py88
-rw-r--r--synapse/rest/client/directory.py12
-rw-r--r--synapse/rest/client/events.py1
-rw-r--r--synapse/rest/client/keys.py46
-rw-r--r--synapse/rest/client/knock.py13
-rw-r--r--synapse/rest/client/login.py67
-rw-r--r--synapse/rest/client/media.py15
-rw-r--r--synapse/rest/client/presence.py26
-rw-r--r--synapse/rest/client/profile.py204
-rw-r--r--synapse/rest/client/pusher.py16
-rw-r--r--synapse/rest/client/receipts.py4
-rw-r--r--synapse/rest/client/register.py410
-rw-r--r--synapse/rest/client/rendezvous.py54
-rw-r--r--synapse/rest/client/reporting.py29
-rw-r--r--synapse/rest/client/room.py144
-rw-r--r--synapse/rest/client/sync.py102
-rw-r--r--synapse/rest/client/tags.py7
-rw-r--r--synapse/rest/client/transactions.py7
-rw-r--r--synapse/rest/client/versions.py19
-rw-r--r--synapse/rest/key/v2/remote_key_resource.py16
-rw-r--r--synapse/rest/media/config_resource.py11
-rw-r--r--synapse/rest/media/upload_resource.py26
-rw-r--r--synapse/rest/synapse/client/__init__.py15
-rw-r--r--synapse/rest/synapse/client/password_reset.py129
-rw-r--r--synapse/rest/synapse/client/pick_idp.py29
-rw-r--r--synapse/rest/synapse/client/saml2/__init__.py42
-rw-r--r--synapse/rest/synapse/client/saml2/metadata_resource.py46
-rw-r--r--synapse/rest/synapse/client/saml2/response_resource.py52
-rw-r--r--synapse/rest/synapse/client/unsubscribe.py88
-rw-r--r--synapse/rest/well_known.py46
-rw-r--r--synapse/server.py64
-rw-r--r--synapse/server_notices/resource_limits_server_notices.py4
-rw-r--r--synapse/state/__init__.py73
-rw-r--r--synapse/state/v2.py4
-rw-r--r--synapse/storage/_base.py22
-rw-r--r--synapse/storage/background_updates.py50
-rw-r--r--synapse/storage/controllers/persist_events.py52
-rw-r--r--synapse/storage/controllers/purge_events.py337
-rw-r--r--synapse/storage/controllers/state.py56
-rw-r--r--synapse/storage/database.py99
-rw-r--r--synapse/storage/databases/__init__.py10
-rw-r--r--synapse/storage/databases/main/__init__.py6
-rw-r--r--synapse/storage/databases/main/account_data.py88
-rw-r--r--synapse/storage/databases/main/cache.py72
-rw-r--r--synapse/storage/databases/main/client_ips.py33
-rw-r--r--synapse/storage/databases/main/delayed_events.py549
-rw-r--r--synapse/storage/databases/main/deviceinbox.py8
-rw-r--r--synapse/storage/databases/main/devices.py27
-rw-r--r--synapse/storage/databases/main/e2e_room_keys.py42
-rw-r--r--synapse/storage/databases/main/end_to_end_keys.py165
-rw-r--r--synapse/storage/databases/main/event_federation.py16
-rw-r--r--synapse/storage/databases/main/event_push_actions.py95
-rw-r--r--synapse/storage/databases/main/events.py1055
-rw-r--r--synapse/storage/databases/main/events_bg_updates.py1425
-rw-r--r--synapse/storage/databases/main/events_worker.py248
-rw-r--r--synapse/storage/databases/main/media_repository.py110
-rw-r--r--synapse/storage/databases/main/monthly_active_users.py93
-rw-r--r--synapse/storage/databases/main/profile.py346
-rw-r--r--synapse/storage/databases/main/purge_events.py50
-rw-r--r--synapse/storage/databases/main/push_rule.py1
-rw-r--r--synapse/storage/databases/main/receipts.py309
-rw-r--r--synapse/storage/databases/main/registration.py742
-rw-r--r--synapse/storage/databases/main/room.py345
-rw-r--r--synapse/storage/databases/main/roommember.py534
-rw-r--r--synapse/storage/databases/main/search.py8
-rw-r--r--synapse/storage/databases/main/sliding_sync.py603
-rw-r--r--synapse/storage/databases/main/state.py42
-rw-r--r--synapse/storage/databases/main/state_deltas.py176
-rw-r--r--synapse/storage/databases/main/stats.py8
-rw-r--r--synapse/storage/databases/main/stream.py603
-rw-r--r--synapse/storage/databases/main/tags.py70
-rw-r--r--synapse/storage/databases/main/transactions.py4
-rw-r--r--synapse/storage/databases/main/user_directory.py40
-rw-r--r--synapse/storage/databases/state/bg_updates.py17
-rw-r--r--synapse/storage/databases/state/deletion.py561
-rw-r--r--synapse/storage/databases/state/store.py172
-rw-r--r--synapse/storage/engines/_base.py5
-rw-r--r--synapse/storage/engines/postgres.py11
-rw-r--r--synapse/storage/engines/sqlite.py6
-rw-r--r--synapse/storage/invite_rule.py110
-rw-r--r--synapse/storage/prepare_database.py4
-rw-r--r--synapse/storage/roommember.py28
-rw-r--r--synapse/storage/schema/__init__.py30
-rw-r--r--synapse/storage/schema/main/delta/25/fts.py3
-rw-r--r--synapse/storage/schema/main/delta/27/ts.py3
-rw-r--r--synapse/storage/schema/main/delta/31/search_update.py3
-rw-r--r--synapse/storage/schema/main/delta/33/event_fields.py3
-rw-r--r--synapse/storage/schema/main/delta/56/unique_user_filter_index.py4
-rw-r--r--synapse/storage/schema/main/delta/61/03recreate_min_depth.py1
-rw-r--r--synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py1
-rw-r--r--synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py1
-rw-r--r--synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py1
-rw-r--r--synapse/storage/schema/main/delta/78/03event_extremities_constraints.py1
-rw-r--r--synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql15
-rw-r--r--synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql169
-rw-r--r--synapse/storage/schema/main/delta/87/02_per_connection_state.sql81
-rw-r--r--synapse/storage/schema/main/delta/87/03_current_state_index.sql19
-rw-r--r--synapse/storage/schema/main/delta/88/01_add_delayed_events.sql43
-rw-r--r--synapse/storage/schema/main/delta/88/01_custom_profile_fields.sql15
-rw-r--r--synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql21
-rw-r--r--synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql18
-rw-r--r--synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql18
-rw-r--r--synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres19
-rw-r--r--synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite19
-rw-r--r--synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql20
-rw-r--r--synapse/storage/schema/main/delta/88/06_events_received_ts_index.sql17
-rw-r--r--synapse/storage/schema/main/delta/89/01_sliding_sync_membership_snapshot_index.sql15
-rw-r--r--synapse/storage/schema/main/delta/90/01_add_column_participant_room_memberships_table.sql16
-rw-r--r--synapse/storage/schema/main/delta/91/01_media_hash.sql28
-rw-r--r--synapse/storage/schema/main/delta/92/01_remove_trigger.sql.postgres16
-rw-r--r--synapse/storage/schema/main/delta/92/01_remove_trigger.sql.sqlite16
-rw-r--r--synapse/storage/schema/main/delta/92/02_remove_populate_participant_bg_update.sql17
-rw-r--r--synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql16
-rw-r--r--synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql17
-rw-r--r--synapse/storage/schema/state/delta/89/01_state_groups_deletion.sql39
-rw-r--r--synapse/storage/schema/state/delta/90/02_delete_unreferenced_state_groups.sql16
-rw-r--r--synapse/storage/schema/state/delta/90/03_remove_old_deletion_bg_update.sql15
-rw-r--r--synapse/storage/types.py3
-rw-r--r--synapse/synapse_rust/events.pyi28
-rw-r--r--synapse/synapse_rust/push.pyi2
-rw-r--r--synapse/types/__init__.py13
-rw-r--r--synapse/types/handlers/__init__.py364
-rw-r--r--synapse/types/handlers/policy_server.py16
-rw-r--r--synapse/types/handlers/sliding_sync.py875
-rw-r--r--synapse/types/rest/__init__.py9
-rw-r--r--synapse/types/rest/client/__init__.py75
-rw-r--r--synapse/types/state.py39
-rw-r--r--synapse/types/storage/__init__.py56
-rw-r--r--synapse/util/async_helpers.py154
-rw-r--r--synapse/util/caches/dictionary_cache.py13
-rw-r--r--synapse/util/caches/expiringcache.py3
-rw-r--r--synapse/util/caches/lrucache.py3
-rw-r--r--synapse/util/caches/response_cache.py33
-rw-r--r--synapse/util/caches/stream_change_cache.py23
-rw-r--r--synapse/util/events.py29
-rw-r--r--synapse/util/iterutils.py7
-rw-r--r--synapse/util/linked_list.py3
-rw-r--r--synapse/util/macaroons.py3
-rw-r--r--synapse/util/metrics.py15
-rw-r--r--synapse/util/msisdn.py51
-rw-r--r--synapse/util/patch_inline_callbacks.py6
-rw-r--r--synapse/util/ratelimitutils.py2
-rw-r--r--synapse/util/rust.py87
-rw-r--r--synapse/util/stringutils.py12
-rw-r--r--synapse/util/task_scheduler.py155
-rw-r--r--synapse/util/threepids.py123
-rw-r--r--synapse/util/wheel_timer.py6
-rw-r--r--synapse/visibility.py72
-rw-r--r--synmark/__init__.py4
-rw-r--r--synmark/__main__.py6
-rwxr-xr-xtest.sh2
-rw-r--r--tests/api/test_auth.py17
-rw-r--r--tests/api/test_ratelimiting.py54
-rw-r--r--tests/api/test_urls.py55
-rw-r--r--tests/appservice/test_scheduler.py40
-rw-r--r--tests/config/test_api.py3
-rw-r--r--tests/config/test_appservice.py7
-rw-r--r--tests/config/test_cache.py3
-rw-r--r--tests/config/test_database.py5
-rw-r--r--tests/config/test_load.py194
-rw-r--r--tests/config/test_oauth_delegation.py16
-rw-r--r--tests/config/test_room_directory.py5
-rw-r--r--tests/config/test_server.py23
-rw-r--r--tests/config/test_workers.py2
-rw-r--r--tests/config/utils.py5
-rw-r--r--tests/events/test_auto_accept_invites.py186
-rw-r--r--tests/events/test_utils.py3
-rw-r--r--tests/federation/test_complexity.py4
-rw-r--r--tests/federation/test_federation_catch_up.py5
-rw-r--r--tests/federation/test_federation_devices.py161
-rw-r--r--tests/federation/test_federation_media.py41
-rw-r--r--tests/federation/test_federation_out_of_band_membership.py671
-rw-r--r--tests/federation/test_federation_sender.py134
-rw-r--r--tests/federation/test_federation_server.py247
-rw-r--r--tests/handlers/test_admin.py1
-rw-r--r--tests/handlers/test_appservice.py15
-rw-r--r--tests/handlers/test_cas.py239
-rw-r--r--tests/handlers/test_directory.py1
-rw-r--r--tests/handlers/test_e2e_keys.py298
-rw-r--r--tests/handlers/test_federation.py33
-rw-r--r--tests/handlers/test_federation_event.py27
-rw-r--r--tests/handlers/test_oauth_delegation.py188
-rw-r--r--tests/handlers/test_oidc.py291
-rw-r--r--tests/handlers/test_password_providers.py71
-rw-r--r--tests/handlers/test_presence.py217
-rw-r--r--tests/handlers/test_profile.py1
-rw-r--r--tests/handlers/test_register.py58
-rw-r--r--tests/handlers/test_room_list.py6
-rw-r--r--tests/handlers/test_room_member.py224
-rw-r--r--tests/handlers/test_room_policy.py226
-rw-r--r--tests/handlers/test_room_summary.py48
-rw-r--r--tests/handlers/test_saml.py381
-rw-r--r--tests/handlers/test_send_email.py230
-rw-r--r--tests/handlers/test_sliding_sync.py4027
-rw-r--r--tests/handlers/test_sync.py317
-rw-r--r--tests/handlers/test_user_directory.py101
-rw-r--r--tests/handlers/test_worker_lock.py30
-rw-r--r--tests/http/federation/test_srv_resolver.py12
-rw-r--r--tests/http/server/_base.py107
-rw-r--r--tests/http/test_client.py46
-rw-r--r--tests/http/test_matrixfederationclient.py139
-rw-r--r--tests/http/test_proxy.py32
-rw-r--r--tests/http/test_proxyagent.py6
-rw-r--r--tests/http/test_servlet.py2
-rw-r--r--tests/http/test_site.py53
-rw-r--r--tests/logging/test_terse_json.py1
-rw-r--r--tests/media/test_media_retention.py11
-rw-r--r--tests/media/test_media_storage.py295
-rw-r--r--tests/media/test_oembed.py3
-rw-r--r--tests/metrics/test_metrics.py3
-rw-r--r--tests/metrics/test_phone_home_stats.py263
-rw-r--r--tests/module_api/test_account_data_manager.py4
-rw-r--r--tests/module_api/test_api.py14
-rw-r--r--tests/module_api/test_spamchecker.py244
-rw-r--r--tests/push/test_bulk_push_rule_evaluator.py8
-rw-r--r--tests/push/test_email.py522
-rw-r--r--tests/push/test_http.py162
-rw-r--r--tests/push/test_push_rule_evaluator.py4
-rw-r--r--tests/replication/http/test__base.py4
-rw-r--r--tests/replication/tcp/streams/test_events.py2
-rw-r--r--tests/replication/test_federation_sender_shard.py110
-rw-r--r--tests/replication/test_multi_media_repo.py4
-rw-r--r--tests/rest/admin/test_admin.py30
-rw-r--r--tests/rest/admin/test_device.py64
-rw-r--r--tests/rest/admin/test_event_reports.py35
-rw-r--r--tests/rest/admin/test_federation.py2
-rw-r--r--tests/rest/admin/test_media.py76
-rw-r--r--tests/rest/admin/test_room.py101
-rw-r--r--tests/rest/admin/test_scheduled_tasks.py192
-rw-r--r--tests/rest/admin/test_server_notice.py12
-rw-r--r--tests/rest/admin/test_statistics.py2
-rw-r--r--tests/rest/admin/test_user.py1329
-rw-r--r--tests/rest/client/sliding_sync/test_connection_tracking.py18
-rw-r--r--tests/rest/client/sliding_sync/test_extension_account_data.py635
-rw-r--r--tests/rest/client/sliding_sync/test_extension_e2ee.py18
-rw-r--r--tests/rest/client/sliding_sync/test_extension_receipts.py255
-rw-r--r--tests/rest/client/sliding_sync/test_extension_to_device.py17
-rw-r--r--tests/rest/client/sliding_sync/test_extension_typing.py18
-rw-r--r--tests/rest/client/sliding_sync/test_extensions.py31
-rw-r--r--tests/rest/client/sliding_sync/test_lists_filters.py1975
-rw-r--r--tests/rest/client/sliding_sync/test_room_subscriptions.py18
-rw-r--r--tests/rest/client/sliding_sync/test_rooms_invites.py18
-rw-r--r--tests/rest/client/sliding_sync/test_rooms_meta.py730
-rw-r--r--tests/rest/client/sliding_sync/test_rooms_required_state.py1063
-rw-r--r--tests/rest/client/sliding_sync/test_rooms_timeline.py208
-rw-r--r--tests/rest/client/sliding_sync/test_sliding_sync.py1294
-rw-r--r--tests/rest/client/test_account.py1264
-rw-r--r--tests/rest/client/test_auth_issuer.py59
-rw-r--r--tests/rest/client/test_auth_metadata.py140
-rw-r--r--tests/rest/client/test_capabilities.py71
-rw-r--r--tests/rest/client/test_delayed_events.py610
-rw-r--r--tests/rest/client/test_devices.py181
-rw-r--r--tests/rest/client/test_events.py2
-rw-r--r--tests/rest/client/test_identity.py67
-rw-r--r--tests/rest/client/test_keys.py12
-rw-r--r--tests/rest/client/test_login.py404
-rw-r--r--tests/rest/client/test_login_token_request.py1
-rw-r--r--tests/rest/client/test_media.py183
-rw-r--r--tests/rest/client/test_models.py89
-rw-r--r--tests/rest/client/test_owned_state.py308
-rw-r--r--tests/rest/client/test_presence.py52
-rw-r--r--tests/rest/client/test_profile.py297
-rw-r--r--tests/rest/client/test_register.py397
-rw-r--r--tests/rest/client/test_rendezvous.py15
-rw-r--r--tests/rest/client/test_reporting.py31
-rw-r--r--tests/rest/client/test_rooms.py720
-rw-r--r--tests/rest/client/test_shadow_banned.py29
-rw-r--r--tests/rest/client/test_sync.py31
-rw-r--r--tests/rest/client/test_tags.py95
-rw-r--r--tests/rest/client/test_third_party_rules.py159
-rw-r--r--tests/rest/client/utils.py74
-rw-r--r--tests/rest/media/test_domain_blocking.py5
-rw-r--r--tests/rest/media/test_url_preview.py5
-rw-r--r--tests/rest/test_well_known.py44
-rw-r--r--tests/server.py71
-rw-r--r--tests/server_notices/test_resource_limits_server_notices.py2
-rw-r--r--tests/storage/databases/main/test_events_worker.py49
-rw-r--r--tests/storage/test__base.py18
-rw-r--r--tests/storage/test_account_data.py15
-rw-r--r--tests/storage/test_base.py2
-rw-r--r--tests/storage/test_devices.py6
-rw-r--r--tests/storage/test_event_federation.py2
-rw-r--r--tests/storage/test_event_push_actions.py20
-rw-r--r--tests/storage/test_events.py3
-rw-r--r--tests/storage/test_events_bg_updates.py157
-rw-r--r--tests/storage/test_invite_rule.py167
-rw-r--r--tests/storage/test_monthly_active_users.py171
-rw-r--r--tests/storage/test_purge.py329
-rw-r--r--tests/storage/test_registration.py34
-rw-r--r--tests/storage/test_roommember.py174
-rw-r--r--tests/storage/test_sliding_sync_tables.py5119
-rw-r--r--tests/storage/test_state_deletion.py475
-rw-r--r--tests/storage/test_stream.py114
-rw-r--r--tests/test_event_auth.py14
-rw-r--r--tests/test_federation.py376
-rw-r--r--tests/test_mau.py3
-rw-r--r--tests/test_server.py41
-rw-r--r--tests/test_state.py20
-rw-r--r--tests/test_types.py4
-rw-r--r--tests/test_utils/__init__.py22
-rw-r--r--tests/test_utils/logging_setup.py2
-rw-r--r--tests/test_utils/oidc.py19
-rw-r--r--tests/unittest.py15
-rw-r--r--tests/util/test_async_helpers.py123
-rw-r--r--tests/util/test_check_dependencies.py22
-rw-r--r--tests/util/test_linearizer.py4
-rw-r--r--tests/util/test_stream_change_cache.py43
-rw-r--r--tests/util/test_stringutils.py28
-rw-r--r--tests/util/test_task_scheduler.py53
-rw-r--r--tests/util/test_threepids.py55
-rw-r--r--tests/util/test_wheel_timer.py50
-rw-r--r--tests/utils.py26
-rw-r--r--tox.ini2
627 files changed, 61856 insertions, 30866 deletions
diff --git a/.ci/before_build_wheel.sh b/.ci/before_build_wheel.sh
new file mode 100644

index 0000000000..56108dcd60 --- /dev/null +++ b/.ci/before_build_wheel.sh
@@ -0,0 +1,10 @@ +#!/bin/sh +set -xeu + +# On 32-bit Linux platforms, we need libatomic1 to use rustup +if command -v yum &> /dev/null; then + yum install -y libatomic +fi + +# Install a Rust toolchain +curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.82.0 -y --profile minimal diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py
index 92bd192fd1..5249acdc5d 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py
@@ -36,11 +36,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/") # First calculate the various trial jobs. # # For PRs, we only run each type of test with the oldest Python version supported (which -# is Python 3.8 right now) +# is Python 3.9 right now) trial_sqlite_tests = [ { - "python-version": "3.8", + "python-version": "3.9", "database": "sqlite", "extras": "all", } @@ -53,14 +53,14 @@ if not IS_PR: "database": "sqlite", "extras": "all", } - for version in ("3.9", "3.10", "3.11", "3.12") + for version in ("3.10", "3.11", "3.12", "3.13") ) trial_postgres_tests = [ { - "python-version": "3.8", + "python-version": "3.9", "database": "postgres", - "postgres-version": "11", + "postgres-version": "13", "extras": "all", } ] @@ -68,16 +68,16 @@ trial_postgres_tests = [ if not IS_PR: trial_postgres_tests.append( { - "python-version": "3.12", + "python-version": "3.13", "database": "postgres", - "postgres-version": "16", + "postgres-version": "17", "extras": "all", } ) trial_no_extra_tests = [ { - "python-version": "3.8", + "python-version": "3.9", "database": "sqlite", "extras": "", } @@ -99,24 +99,24 @@ set_output("trial_test_matrix", test_matrix) # First calculate the various sytest jobs. # -# For each type of test we only run on focal on PRs +# For each type of test we only run on bullseye on PRs sytest_tests = [ { - "sytest-tag": "focal", + "sytest-tag": "bullseye", }, { - "sytest-tag": "focal", + "sytest-tag": "bullseye", "postgres": "postgres", }, { - "sytest-tag": "focal", + "sytest-tag": "bullseye", "postgres": "multi-postgres", "workers": "workers", }, { - "sytest-tag": "focal", + "sytest-tag": "bullseye", "postgres": "multi-postgres", "workers": "workers", "reactor": "asyncio", @@ -127,11 +127,11 @@ if not IS_PR: sytest_tests.extend( [ { - "sytest-tag": "focal", + "sytest-tag": "bullseye", "reactor": "asyncio", }, { - "sytest-tag": "focal", + "sytest-tag": "bullseye", "postgres": "postgres", "reactor": "asyncio", }, diff --git a/.ci/scripts/check_lockfile.py b/.ci/scripts/check_lockfile.py
index 19cec7ddd6..46d3952b4c 100755 --- a/.ci/scripts/check_lockfile.py +++ b/.ci/scripts/check_lockfile.py
@@ -11,12 +11,12 @@ with open("poetry.lock", "rb") as f: try: lock_version = lockfile["metadata"]["lock-version"] - assert lock_version == "2.0" + assert lock_version == "2.1" except Exception: print( """\ - Lockfile is not version 2.0. You probably need to upgrade poetry on your local box - and re-run `poetry lock --no-update`. See the Poetry cheat sheet at + Lockfile is not version 2.1. You probably need to upgrade poetry on your local box + and re-run `poetry lock`. See the Poetry cheat sheet at https://element-hq.github.io/synapse/develop/development/dependencies.html """ ) diff --git a/.ci/scripts/prepare_old_deps.sh b/.ci/scripts/prepare_old_deps.sh
index 580f87bbdf..3589be26f8 100755 --- a/.ci/scripts/prepare_old_deps.sh +++ b/.ci/scripts/prepare_old_deps.sh
@@ -1,5 +1,5 @@ #!/usr/bin/env bash -# this script is run by GitHub Actions in a plain `focal` container; it +# this script is run by GitHub Actions in a plain `jammy` container; it # - installs the minimal system requirements, and poetry; # - patches the project definition file to refer to old versions only; # - creates a venv with these old versions using poetry; and finally diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 07d4f6dfce..f8e60815fa 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -9,5 +9,4 @@ - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. -* [ ] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct - (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) +* [ ] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index 06aaeb851f..feeadf170d 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml
@@ -14,26 +14,26 @@ permissions: id-token: write # needed for signing the images with GitHub OIDC Token jobs: build: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Set up QEMU id: qemu - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 with: platforms: arm64 - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 - name: Inspect builder run: docker buildx inspect - name: Install Cosign - uses: sigstore/cosign-installer@v3.5.0 + uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2 - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Extract version from pyproject.toml # Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see @@ -43,13 +43,13 @@ jobs: echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV - name: Log in to DockerHub - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Log in to GHCR - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 with: registry: ghcr.io username: ${{ github.repository_owner }} @@ -57,7 +57,7 @@ jobs: - name: Calculate docker image tag id: set-tag - uses: docker/metadata-action@master + uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0 with: images: | docker.io/matrixdotorg/synapse @@ -72,7 +72,7 @@ jobs: - name: Build and push all platforms id: build-and-push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0 with: push: true labels: | diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml
index 6d184a21e0..8a06ad6362 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml
@@ -14,7 +14,7 @@ jobs: # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: - name: 📥 Download artifact - uses: dawidd6/action-download-artifact@bf251b5aa9c2f7eeb574a96ee720e24f801b7c11 # v6 + uses: dawidd6/action-download-artifact@07ab29fd4a977ae4d2b275087cf67563dfdf0295 # v9 with: workflow: docs-pr.yaml run_id: ${{ github.event.workflow_run.id }} @@ -22,7 +22,7 @@ jobs: path: book - name: 📤 Deploy to Netlify - uses: matrix-org/netlify-pr-preview@v3 + uses: matrix-org/netlify-pr-preview@9805cd123fc9a7e421e35340a05e1ebc5dee46b5 # v3 with: path: book owner: ${{ github.event.workflow_run.head_repository.owner.login }} diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml
index 07dc301b1a..1f4f79598a 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml
@@ -13,7 +13,7 @@ jobs: name: GitHub Pages runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: # Fetch all history so that the schema_versions script works. fetch-depth: 0 @@ -24,7 +24,7 @@ jobs: mdbook-version: '0.4.17' - name: Setup python - uses: actions/setup-python@v5 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: "3.x" @@ -39,7 +39,7 @@ jobs: cp book/welcome_and_overview.html book/index.html - name: Upload Artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: book path: book @@ -50,7 +50,7 @@ jobs: name: Check links in documentation runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup mdbook uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0 diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index 434dcbb6c7..64fb437b42 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml
@@ -50,7 +50,7 @@ jobs: needs: - pre steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: # Fetch all history so that the schema_versions script works. fetch-depth: 0 @@ -64,7 +64,7 @@ jobs: run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js - name: Setup python - uses: actions/setup-python@v5 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: "3.x" @@ -78,6 +78,18 @@ jobs: mdbook build cp book/welcome_and_overview.html book/index.html + - name: Prepare and publish schema files + run: | + sudo apt-get update && sudo apt-get install -y yq + mkdir -p book/schema + # Remove developer notice before publishing. + rm schema/v*/Do\ not\ edit\ files\ in\ this\ folder + # Copy schema files that are independent from current Synapse version. + cp -r -t book/schema schema/v*/ + # Convert config schema from YAML source file to JSON. + yq < schema/synapse-config.schema.yaml \ + > book/schema/synapse-config.schema.json + # Deploy to the target directory. - name: Deploy to gh pages uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0 diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml
index f1e35fcd99..923e96a624 100644 --- a/.github/workflows/fix_lint.yaml +++ b/.github/workflows/fix_lint.yaml
@@ -13,33 +13,30 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@master + uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1) with: # We use nightly so that `fmt` correctly groups together imports, and # clippy correctly fixes up the benchmarks. toolchain: nightly-2022-12-01 - components: rustfmt - - uses: Swatinem/rust-cache@v2 + components: clippy, rustfmt + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - name: Setup Poetry - uses: matrix-org/setup-python-poetry@v1 + uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: install-project: "false" + poetry-version: "2.1.1" - - name: Import order (isort) + - name: Run ruff check continue-on-error: true - run: poetry run isort . + run: poetry run ruff check --fix . - - name: Code style (black) + - name: Run ruff format continue-on-error: true - run: poetry run black . - - - name: Semantic checks (ruff) - continue-on-error: true - run: poetry run ruff --fix . + run: poetry run ruff format --quiet . - run: cargo clippy --all-features --fix -- -D warnings continue-on-error: true @@ -47,6 +44,6 @@ jobs: - run: cargo fmt continue-on-error: true - - uses: stefanzweifel/git-auto-commit-action@v5 + - uses: stefanzweifel/git-auto-commit-action@b863ae1933cb653a53c021fe36dbb774e1fb9403 # v5.2.0 with: - commit_message: "Attempt to fix linting" + commit_message: "Attempt to fix linting" diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index b9e9a401b9..ee0dac3beb 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml
@@ -39,17 +39,17 @@ jobs: if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1) + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 # The dev dependencies aren't exposed in the wheel metadata (at least with current # poetry-core versions), so we install with poetry. - - uses: matrix-org/setup-python-poetry@v1 + - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: python-version: "3.x" - poetry-version: "1.3.2" + poetry-version: "2.1.1" extras: "all" # Dump installed versions for debugging. - run: poetry run pip list > before.txt @@ -72,11 +72,11 @@ jobs: postgres-version: "14" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1) + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - run: sudo apt-get -qq install xmlsec1 - name: Set up PostgreSQL ${{ matrix.postgres-version }} @@ -86,7 +86,7 @@ jobs: -e POSTGRES_PASSWORD=postgres \ -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \ postgres:${{ matrix.postgres-version }} - - uses: actions/setup-python@v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: "3.x" - run: pip install .[all,test] @@ -132,9 +132,9 @@ jobs: fail-fast: false matrix: include: - - sytest-tag: focal + - sytest-tag: bullseye - - sytest-tag: focal + - sytest-tag: bullseye postgres: postgres workers: workers redis: redis @@ -145,11 +145,11 @@ jobs: BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1) + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - name: Ensure sytest runs `pip install` # Delete the lockfile so sytest will `pip install` rather than `poetry install` @@ -164,7 +164,7 @@ jobs: if: ${{ always() }} run: /sytest/scripts/tap_to_gha.pl /logs/results.tap - name: Upload SyTest logs - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: ${{ always() }} with: name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }}) @@ -192,15 +192,15 @@ jobs: database: Postgres steps: - - name: Run actions/checkout@v4 for synapse - uses: actions/checkout@v4 + - name: Check out synapse codebase + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: path: synapse - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh - - uses: actions/setup-go@v5 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: cache-dependency-path: complement/go.sum go-version-file: complement/go.mod @@ -225,7 +225,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/poetry_lockfile.yaml b/.github/workflows/poetry_lockfile.yaml
index 496e536b93..1668ad81d2 100644 --- a/.github/workflows/poetry_lockfile.yaml +++ b/.github/workflows/poetry_lockfile.yaml
@@ -16,8 +16,8 @@ jobs: name: "Check locked dependencies have sdists" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: '3.x' - run: pip install tomli diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml
index 6fbd2ed015..7c8eb446cd 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml
@@ -33,29 +33,29 @@ jobs: packages: write steps: - name: Checkout specific branch (debug build) - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 if: github.event_name == 'workflow_dispatch' with: ref: ${{ inputs.branch }} - name: Checkout clean copy of develop (scheduled build) - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 if: github.event_name == 'schedule' with: ref: develop - name: Checkout clean copy of master (on-push) - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 if: github.event_name == 'push' with: ref: master - name: Login to registry - uses: docker/login-action@v3 + uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Work out labels for complement image id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0 with: images: ghcr.io/${{ github.repository }}/complement-synapse tags: | diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml
index 9f0feffd94..572d73e6ad 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml
@@ -27,8 +27,8 @@ jobs: name: "Calculate list of debian distros" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: '3.x' - id: set-distros @@ -55,18 +55,18 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: path: src - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 with: install: true - name: Set up docker layer caching - uses: actions/cache@v4 + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildx-${{ github.sha }} @@ -74,7 +74,7 @@ jobs: ${{ runner.os }}-buildx- - name: Set up python - uses: actions/setup-python@v5 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: '3.x' @@ -91,10 +91,19 @@ jobs: rm -rf /tmp/.buildx-cache mv /tmp/.buildx-cache-new /tmp/.buildx-cache + - name: Artifact name + id: artifact-name + # We can't have colons in the upload name of the artifact, so we convert + # e.g. `debian:sid` to `sid`. + env: + DISTRO: ${{ matrix.distro }} + run: | + echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT" + - name: Upload debs as artifacts - uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: - name: debs + name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }} path: debs/* build-wheels: @@ -102,7 +111,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-20.04, macos-12] + os: [ubuntu-22.04, macos-13] arch: [x86_64, aarch64] # is_pr is a flag used to exclude certain jobs from the matrix on PRs. # It is not read by the rest of the workflow. @@ -112,29 +121,29 @@ jobs: exclude: # Don't build macos wheels on PR CI. - is_pr: true - os: "macos-12" + os: "macos-13" # Don't build aarch64 wheels on mac. - - os: "macos-12" + - os: "macos-13" arch: aarch64 # Don't build aarch64 wheels on PR CI. - is_pr: true arch: aarch64 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: # setup-python@v4 doesn't impose a default python version. Need to use 3.x # here, because `python` on osx points to Python 2.7. python-version: "3.x" - name: Install cibuildwheel - run: python -m pip install cibuildwheel==2.19.1 + run: python -m pip install cibuildwheel==2.23.0 - name: Set up QEMU to emulate aarch64 if: matrix.arch == 'aarch64' - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 with: platforms: arm64 @@ -144,7 +153,7 @@ jobs: - name: Only build a single wheel on PR if: startsWith(github.ref, 'refs/pull/') - run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV + run: echo "CIBW_BUILD="cp39-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV - name: Build wheels run: python -m cibuildwheel --output-dir wheelhouse @@ -156,9 +165,9 @@ jobs: CARGO_NET_GIT_FETCH_WITH_CLI: true CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI - - uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: - name: Wheel + name: Wheel-${{ matrix.os }}-${{ matrix.arch }} path: ./wheelhouse/*.whl build-sdist: @@ -167,8 +176,8 @@ jobs: if: ${{ !startsWith(github.ref, 'refs/pull/') }} steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: '3.10' @@ -177,7 +186,7 @@ jobs: - name: Build sdist run: python -m build --sdist - - uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: name: Sdist path: dist/*.tar.gz @@ -194,17 +203,23 @@ jobs: runs-on: ubuntu-latest steps: - name: Download all workflow run artifacts - uses: actions/download-artifact@v3 # Don't upgrade to v4, it should match upload-artifact + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 - name: Build a tarball for the debs - run: tar -cvJf debs.tar.xz debs + # We need to merge all the debs uploads into one folder, then compress + # that. + run: | + mkdir debs + mv debs*/* debs/ + tar -cvJf debs.tar.xz debs - name: Attach to release - uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109 + # Pinned to work around https://github.com/softprops/action-gh-release/issues/445 + uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v0.1.15 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: files: | Sdist/* - Wheel/* + Wheel*/* debs.tar.xz # if it's not already published, keep the release as a draft. draft: true diff --git a/.github/workflows/schema.yaml b/.github/workflows/schema.yaml new file mode 100644
index 0000000000..41f3cd01be --- /dev/null +++ b/.github/workflows/schema.yaml
@@ -0,0 +1,54 @@ +name: Schema + +on: + pull_request: + paths: + - schema/** + - docs/usage/configuration/config_documentation.md + +jobs: + validate-schema: + name: Ensure Synapse config schema is valid + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0 + with: + python-version: "3.x" + - name: Install check-jsonschema + run: pip install check-jsonschema==0.33.0 + + - name: Validate meta schema + run: check-jsonschema --check-metaschema schema/v*/meta.schema.json + - name: Validate schema + run: |- + # Please bump on introduction of a new meta schema. + LATEST_META_SCHEMA_VERSION=v1 + check-jsonschema \ + --schemafile="schema/$LATEST_META_SCHEMA_VERSION/meta.schema.json" \ + schema/synapse-config.schema.yaml + - name: Validate default config + # Populates the empty instance with default values and checks against the schema. + run: |- + echo "{}" | check-jsonschema \ + --fill-defaults --schemafile=schema/synapse-config.schema.yaml - + + check-doc-generation: + name: Ensure generated documentation is up-to-date + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0 + with: + python-version: "3.x" + - name: Install PyYAML + run: pip install PyYAML==6.0.2 + + - name: Regenerate config documentation + run: | + scripts-dev/gen_config_documentation.py \ + schema/synapse-config.schema.yaml \ + > docs/usage/configuration/config_documentation.md + - name: Error in case of any differences + # Errors if there are now any modified files (untracked files are ignored). + run: 'git diff || ! git status --porcelain=1 | grep "^ M"' diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 730f8552d9..848240f68e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml
@@ -23,7 +23,7 @@ jobs: linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }} linting_readme: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting_readme }} steps: - - uses: dorny/paths-filter@v3 + - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 id: filter # We only check on PRs if: startsWith(github.ref, 'refs/pull/') @@ -83,14 +83,14 @@ jobs: if: ${{ needs.changes.outputs.linting == 'true' }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@1.66.0 - - uses: Swatinem/rust-cache@v2 - - uses: matrix-org/setup-python-poetry@v1 + uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 + - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: python-version: "3.x" - poetry-version: "1.3.2" + poetry-version: "2.1.1" extras: "all" - run: poetry run scripts-dev/generate_sample_config.sh --check - run: poetry run scripts-dev/config-lint.sh @@ -101,8 +101,8 @@ jobs: if: ${{ needs.changes.outputs.linting == 'true' }} steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: "3.x" - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'" @@ -111,8 +111,8 @@ jobs: check-lockfile: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: "3.x" - run: .ci/scripts/check_lockfile.py @@ -124,22 +124,19 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Poetry - uses: matrix-org/setup-python-poetry@v1 + uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: + poetry-version: "2.1.1" install-project: "false" - - name: Import order (isort) - run: poetry run isort --check --diff . + - name: Run ruff check + run: poetry run ruff check --output-format=github . - - name: Code style (black) - run: poetry run black --check --diff . - - - name: Semantic checks (ruff) - # --quiet suppresses the update check. - run: poetry run ruff check --quiet . + - name: Run ruff format + run: poetry run ruff format --check . lint-mypy: runs-on: ubuntu-latest @@ -149,14 +146,14 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@1.66.0 - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - name: Setup Poetry - uses: matrix-org/setup-python-poetry@v1 + uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: # We want to make use of type hints in optional dependencies too. extras: all @@ -165,11 +162,12 @@ jobs: # https://github.com/matrix-org/synapse/pull/15376#issuecomment-1498983775 # To make CI green, err towards caution and install the project. install-project: "true" + poetry-version: "2.1.1" # Cribbed from # https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17 - name: Restore/persist mypy's cache - uses: actions/cache@v4 + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: | .mypy_cache @@ -182,7 +180,7 @@ jobs: lint-crlf: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Check line endings run: scripts-dev/check_line_terminators.sh @@ -190,11 +188,11 @@ jobs: if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: "3.x" - run: "pip install 'towncrier>=18.6.0rc1'" @@ -208,15 +206,15 @@ jobs: if: ${{ needs.changes.outputs.linting == 'true' }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ github.event.pull_request.head.sha }} - name: Install Rust - uses: dtolnay/rust-toolchain@1.66.0 - - uses: Swatinem/rust-cache@v2 - - uses: matrix-org/setup-python-poetry@v1 + uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 + - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: - poetry-version: "1.3.2" + poetry-version: "2.1.1" extras: "all" - run: poetry run scripts-dev/check_pydantic_models.py @@ -226,13 +224,13 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@1.66.0 + uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0 with: components: clippy - - uses: Swatinem/rust-cache@v2 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - run: cargo clippy -- -D warnings @@ -244,14 +242,14 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@master + uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1) with: toolchain: nightly-2022-12-01 components: clippy - - uses: Swatinem/rust-cache@v2 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - run: cargo clippy --all-features -- -D warnings @@ -261,15 +259,15 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@master + uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1) with: # We use nightly so that it correctly groups together imports toolchain: nightly-2022-12-01 components: rustfmt - - uses: Swatinem/rust-cache@v2 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - run: cargo fmt --check @@ -280,8 +278,8 @@ jobs: needs: changes if: ${{ needs.changes.outputs.linting_readme == 'true' }} steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: "3.x" - run: "pip install rstcheck" @@ -305,7 +303,7 @@ jobs: - lint-readme runs-on: ubuntu-latest steps: - - uses: matrix-org/done-action@v3 + - uses: matrix-org/done-action@3409aa904e8a2aaf2220f09bc954d3d0b0a2ee67 # v3 with: needs: ${{ toJSON(needs) }} @@ -328,8 +326,8 @@ jobs: needs: linting-done runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: "3.x" - id: get-matrix @@ -349,7 +347,7 @@ jobs: job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - run: sudo apt-get -qq install xmlsec1 - name: Set up PostgreSQL ${{ matrix.job.postgres-version }} if: ${{ matrix.job.postgres-version }} @@ -364,13 +362,13 @@ jobs: postgres:${{ matrix.job.postgres-version }} - name: Install Rust - uses: dtolnay/rust-toolchain@1.66.0 - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - - uses: matrix-org/setup-python-poetry@v1 + - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: python-version: ${{ matrix.job.python-version }} - poetry-version: "1.3.2" + poetry-version: "2.1.1" extras: ${{ matrix.job.extras }} - name: Await PostgreSQL if: ${{ matrix.job.postgres-version }} @@ -401,24 +399,24 @@ jobs: needs: - linting-done - changes - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@1.66.0 - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 # There aren't wheels for some of the older deps, so we need to install # their build dependencies - run: | sudo apt-get -qq update - sudo apt-get -qq install build-essential libffi-dev python-dev \ + sudo apt-get -qq install build-essential libffi-dev python3-dev \ libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev - - uses: actions/setup-python@v5 + - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: - python-version: '3.8' + python-version: '3.9' - name: Prepare old deps if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true' @@ -462,17 +460,17 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["pypy-3.8"] + python-version: ["pypy-3.9"] extras: ["all"] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 # Install libs necessary for PyPy to build binary wheels for dependencies - run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev - - uses: matrix-org/setup-python-poetry@v1 + - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: python-version: ${{ matrix.python-version }} - poetry-version: "1.3.2" + poetry-version: "2.1.1" extras: ${{ matrix.extras }} - run: poetry run trial --jobs=2 tests - name: Dump logs @@ -516,13 +514,13 @@ jobs: job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Prepare test blacklist run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers - name: Install Rust - uses: dtolnay/rust-toolchain@1.66.0 - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - name: Run SyTest run: /bootstrap.sh synapse @@ -531,7 +529,7 @@ jobs: if: ${{ always() }} run: /sytest/scripts/tap_to_gha.pl /logs/results.tap - name: Upload SyTest logs - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: ${{ always() }} with: name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }}) @@ -561,11 +559,11 @@ jobs: --health-retries 5 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - run: sudo apt-get -qq install xmlsec1 postgresql-client - - uses: matrix-org/setup-python-poetry@v1 + - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: - poetry-version: "1.3.2" + poetry-version: "2.1.1" extras: "postgres" - run: .ci/scripts/test_export_data_command.sh env: @@ -584,11 +582,11 @@ jobs: strategy: matrix: include: - - python-version: "3.8" - postgres-version: "11" + - python-version: "3.9" + postgres-version: "13" - - python-version: "3.11" - postgres-version: "15" + - python-version: "3.13" + postgres-version: "17" services: postgres: @@ -605,7 +603,7 @@ jobs: --health-retries 5 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Add PostgreSQL apt repository # We need a version of pg_dump that can handle the version of # PostgreSQL being tested against. The Ubuntu package repository lags @@ -616,10 +614,10 @@ jobs: wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - sudo apt-get update - run: sudo apt-get -qq install xmlsec1 postgresql-client - - uses: matrix-org/setup-python-poetry@v1 + - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: python-version: ${{ matrix.python-version }} - poetry-version: "1.3.2" + poetry-version: "2.1.1" extras: "postgres" - run: .ci/scripts/test_synapse_port_db.sh id: run_tester_script @@ -629,7 +627,7 @@ jobs: PGPASSWORD: postgres PGDATABASE: postgres - name: "Upload schema differences" - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }} with: name: Schema dumps @@ -659,19 +657,19 @@ jobs: database: Postgres steps: - - name: Run actions/checkout@v4 for synapse - uses: actions/checkout@v4 + - name: Checkout synapse codebase + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: path: synapse - name: Install Rust - uses: dtolnay/rust-toolchain@1.66.0 - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh - - uses: actions/setup-go@v5 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: cache-dependency-path: complement/go.sum go-version-file: complement/go.mod @@ -694,11 +692,11 @@ jobs: - changes steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@1.66.0 - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@e05ebb0e73db581a4877c6ce762e29fe1e0b5073 # 1.66.0 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - run: cargo test @@ -712,13 +710,13 @@ jobs: - changes steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@master + uses: dtolnay/rust-toolchain@56f84321dbccf38fb67ce29ab63e4754056677e0 # master (rust 1.85.1) with: toolchain: nightly-2022-12-01 - - uses: Swatinem/rust-cache@v2 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - run: cargo bench --no-run @@ -737,7 +735,7 @@ jobs: - linting-done runs-on: ubuntu-latest steps: - - uses: matrix-org/done-action@v3 + - uses: matrix-org/done-action@3409aa904e8a2aaf2220f09bc954d3d0b0a2ee67 # v3 with: needs: ${{ toJSON(needs) }} diff --git a/.github/workflows/triage-incoming.yml b/.github/workflows/triage-incoming.yml
index 7a369b77fe..1d291a319b 100644 --- a/.github/workflows/triage-incoming.yml +++ b/.github/workflows/triage-incoming.yml
@@ -6,7 +6,7 @@ on: jobs: triage: - uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v2 + uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@18beaf3c8e536108bd04d18e6c3dc40ba3931e28 # v2.0.3 with: project_id: 'PVT_kwDOAIB0Bs4AFDdZ' content_id: ${{ github.event.issue.node_id }} diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml
index d1ac4357b1..e506be393f 100644 --- a/.github/workflows/triage_labelled.yml +++ b/.github/workflows/triage_labelled.yml
@@ -11,7 +11,7 @@ jobs: if: > contains(github.event.issue.labels.*.name, 'X-Needs-Info') steps: - - uses: actions/add-to-project@main + - uses: actions/add-to-project@5b1a254a3546aef88e0a7724a77a623fa2e47c36 # main (v1.0.2 + 10 commits) id: add_project with: project-url: "https://github.com/orgs/matrix-org/projects/67" diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index 76609c2118..5638029b39 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml
@@ -40,16 +40,17 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1) + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - - uses: matrix-org/setup-python-poetry@v1 + - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: python-version: "3.x" extras: "all" + poetry-version: "2.1.1" - run: | poetry remove twisted poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref || 'trunk' }} @@ -64,17 +65,18 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - run: sudo apt-get -qq install xmlsec1 - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1) + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - - uses: matrix-org/setup-python-poetry@v1 + - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: python-version: "3.x" extras: "all test" + poetry-version: "2.1.1" - run: | poetry remove twisted poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk @@ -99,20 +101,20 @@ jobs: if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest container: - # We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version. + # We're using debian:bullseye because it uses Python 3.9 which is our minimum supported Python version. # This job is a canary to warn us about unreleased twisted changes that would cause problems for us if # they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest # version, assuming that any incompatibilities on newer versions would also be present on the oldest. - image: matrixdotorg/sytest-synapse:focal + image: matrixdotorg/sytest-synapse:bullseye volumes: - ${{ github.workspace }}:/src steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 + uses: dtolnay/rust-toolchain@fcf085fcb4b4b8f63f96906cd713eb52181b5ea4 # stable (rust 1.85.1) + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8 - name: Patch dependencies # Note: The poetry commands want to create a virtualenv in /src/.venv/, @@ -136,7 +138,7 @@ jobs: if: ${{ always() }} run: /sytest/scripts/tap_to_gha.pl /logs/results.tap - name: Upload SyTest logs - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 if: ${{ always() }} with: name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }}) @@ -164,14 +166,14 @@ jobs: steps: - name: Run actions/checkout@v4 for synapse - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: path: synapse - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh - - uses: actions/setup-go@v5 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: cache-dependency-path: complement/go.sum go-version-file: complement/go.mod @@ -181,11 +183,11 @@ jobs: run: | set -x DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx - pipx install poetry==1.3.2 + pipx install poetry==2.1.1 poetry remove -n twisted poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk - poetry lock --no-update + poetry lock working-directory: synapse - run: | @@ -206,7 +208,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore
index a89f149ec1..0567934c4e 100644 --- a/.gitignore +++ b/.gitignore
@@ -30,6 +30,7 @@ __pycache__/ /*.signing.key /env/ /.venv*/ +/.venv /homeserver*.yaml /logs /media_store/ diff --git a/CHANGES.md b/CHANGES.md
index c40c11f98c..0c736bfd95 100644 --- a/CHANGES.md +++ b/CHANGES.md
@@ -1,3179 +1,606 @@ -# Synapse 1.113.0rc1 (2024-08-06) - -### Features - -- Track which rooms have been sent to clients in the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17447](https://github.com/element-hq/synapse/issues/17447)) -- Add Account Data extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17477](https://github.com/element-hq/synapse/issues/17477)) -- Add receipts extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17489](https://github.com/element-hq/synapse/issues/17489)) -- Add typing notification extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17505](https://github.com/element-hq/synapse/issues/17505)) - -### Bugfixes - -- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to handle invite/knock rooms when filtering. ([\#17450](https://github.com/element-hq/synapse/issues/17450)) -- Fix a bug introduced in v1.110.0 which caused `/keys/query` to return incomplete results, leading to high network activity and CPU usage on Matrix clients. ([\#17499](https://github.com/element-hq/synapse/issues/17499)) +# Synapse 1.132.0 (2025-06-17) ### Improved Documentation -- Update the [`allowed_local_3pids`](https://element-hq.github.io/synapse/v1.112/usage/configuration/config_documentation.html#allowed_local_3pids) config option's msisdn address to a working example. ([\#17476](https://github.com/element-hq/synapse/issues/17476)) - -### Internal Changes - -- Change sliding sync to use their own token format in preparation for storing per-connection state. ([\#17452](https://github.com/element-hq/synapse/issues/17452)) -- Ensure we don't send down negative `bump_stamp` in experimental sliding sync endpoint. ([\#17478](https://github.com/element-hq/synapse/issues/17478)) -- Do not send down empty room entries down experimental sliding sync endpoint. ([\#17479](https://github.com/element-hq/synapse/issues/17479)) -- Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. ([\#17481](https://github.com/element-hq/synapse/issues/17481), [\#17482](https://github.com/element-hq/synapse/issues/17482)) -- Add some opentracing tags and logging to the experimental sliding sync implementation. ([\#17501](https://github.com/element-hq/synapse/issues/17501)) -- Split and move Sliding Sync tests so we have some more sane test file sizes. ([\#17504](https://github.com/element-hq/synapse/issues/17504)) -- Update the `limited` field description in the Sliding Sync response to accurately describe what it actually represents. ([\#17507](https://github.com/element-hq/synapse/issues/17507)) -- Easier to understand `timeline` assertions in Sliding Sync tests. ([\#17511](https://github.com/element-hq/synapse/issues/17511)) -- Reset the sliding sync connection if we don't recognize the per-connection state position. ([\#17529](https://github.com/element-hq/synapse/issues/17529)) - - - -### Updates to locked dependencies - -* Bump bcrypt from 4.1.3 to 4.2.0. ([\#17495](https://github.com/element-hq/synapse/issues/17495)) -* Bump black from 24.4.2 to 24.8.0. ([\#17522](https://github.com/element-hq/synapse/issues/17522)) -* Bump phonenumbers from 8.13.39 to 8.13.42. ([\#17521](https://github.com/element-hq/synapse/issues/17521)) -* Bump ruff from 0.5.4 to 0.5.5. ([\#17494](https://github.com/element-hq/synapse/issues/17494)) -* Bump serde_json from 1.0.120 to 1.0.121. ([\#17493](https://github.com/element-hq/synapse/issues/17493)) -* Bump serde_json from 1.0.121 to 1.0.122. ([\#17525](https://github.com/element-hq/synapse/issues/17525)) -* Bump towncrier from 23.11.0 to 24.7.1. ([\#17523](https://github.com/element-hq/synapse/issues/17523)) -* Bump types-pyopenssl from 24.1.0.20240425 to 24.1.0.20240722. ([\#17496](https://github.com/element-hq/synapse/issues/17496)) -* Bump types-setuptools from 70.1.0.20240627 to 71.1.0.20240726. ([\#17497](https://github.com/element-hq/synapse/issues/17497)) - -# Synapse 1.112.0 (2024-07-30) - -This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7). - -Note that this security fix is also available as **Synapse 1.111.1**, which does not include the rest of the changes in Synapse 1.112.0. - -This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request. -If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality. +- Improvements to generate config documentation from JSON Schema file. ([\#18522](https://github.com/element-hq/synapse/issues/18522)) -With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**. -The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration. -Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today. -**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time. - -### Internal Changes - -- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502)) - -# Synapse 1.112.0rc1 (2024-07-23) - -Please note that this release candidate does not include the security dependency update -included in version 1.111.1 as this version was released before 1.111.1. -The same security fix can be found in the full release of 1.112.0. +# Synapse 1.132.0rc1 (2025-06-10) ### Features -- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416)) -- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418)) -- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419)) -- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429)) -- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432)) -- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433)) -- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454)) +- Add support for [MSC4155](https://github.com/matrix-org/matrix-spec-proposals/pull/4155) Invite Filtering. ([\#18288](https://github.com/element-hq/synapse/issues/18288)) +- Add experimental `user_may_send_state_event` module API callback. ([\#18455](https://github.com/element-hq/synapse/issues/18455)) +- Add experimental `get_media_config_for_user` and `is_user_allowed_to_upload_media_of_size` module API callbacks that allow overriding of media repository maximum upload size. ([\#18457](https://github.com/element-hq/synapse/issues/18457)) +- Add experimental `get_ratelimit_override_for_user` module API callback that allows overriding of per-user ratelimits. ([\#18458](https://github.com/element-hq/synapse/issues/18458)) +- Pass `room_config` argument to `user_may_create_room` spam checker module callback. ([\#18486](https://github.com/element-hq/synapse/issues/18486)) +- Support configuration of default and extra user types. ([\#18456](https://github.com/element-hq/synapse/issues/18456)) +- Successful requests to `/_matrix/app/v1/ping` will now force Synapse to reattempt delivering transactions to appservices. ([\#18521](https://github.com/element-hq/synapse/issues/18521)) +- Support the import of the `RatelimitOverride` type from `synapse.module_api` in modules and rename `messages_per_second` to `per_second`. ([\#18513](https://github.com/element-hq/synapse/issues/18513)) ### Bugfixes -- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231)) -- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434)) -- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435)) -- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438)) +- Remove destinations from sending if not whitelisted. ([\#18484](https://github.com/element-hq/synapse/issues/18484)) +- Fixed room summary API incorrectly returning that a room is private in the room summary response when the join rule is omitted by the remote server. Contributed by @nexy7574. ([\#18493](https://github.com/element-hq/synapse/issues/18493)) +- Prevent users from adding themselves to their own user ignore list. ([\#18508](https://github.com/element-hq/synapse/issues/18508)) ### Improved Documentation -- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387)) -- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423)) -- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451)) - -### Internal Changes - -- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424)) -- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426)) -- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428)) -- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439)) -- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449)) -- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453)) -- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458)) -- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460)) -- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461)) -- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468)) -- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469)) - - - -### Updates to locked dependencies - -* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441)) -* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464)) -* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444)) -* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440)) -* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445)) -* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465)) -* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466)) -* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456)) -* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467)) -* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448)) -* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443)) -* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446)) -* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442)) -* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427)) - - -# Synapse 1.111.1 (2024-07-30) - -This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7). - -This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request. -If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality. - -With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**. -The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration. - -Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today. - -**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time. - +- Generate config documentation from JSON Schema file. ([\#17892](https://github.com/element-hq/synapse/issues/17892)) +- Mention `CAP_NET_BIND_SERVICE` as an alternative to running Synapse as root in order to bind to a privileged port. ([\#18408](https://github.com/element-hq/synapse/issues/18408)) +- Surface hidden Admin API documentation regarding fetching of scheduled tasks. ([\#18516](https://github.com/element-hq/synapse/issues/18516)) +- Mark the new module APIs in this release as experimental. ([\#18536](https://github.com/element-hq/synapse/issues/18536)) ### Internal Changes -- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502)) - - -# Synapse 1.111.0 (2024-07-16) - -No significant changes since 1.111.0rc2. +- Mark dehydrated devices in the [List All User Devices Admin API](https://element-hq.github.io/synapse/latest/admin_api/user_admin_api.html#list-all-devices). ([\#18252](https://github.com/element-hq/synapse/issues/18252)) +- Reduce disk wastage by cleaning up `received_transactions` older than 1 day, rather than 30 days. ([\#18310](https://github.com/element-hq/synapse/issues/18310)) +- Distinguish all vs local events being persisted in the "Event Send Time Quantiles" graph (Grafana). ([\#18510](https://github.com/element-hq/synapse/issues/18510)) -# Synapse 1.111.0rc2 (2024-07-10) +# Synapse 1.131.0 (2025-06-03) -### Bugfixes - -- Fix bug where using `synapse.app.media_repository` worker configuration would break the new media endpoints. ([\#17420](https://github.com/element-hq/synapse/issues/17420)) - -### Improved Documentation - -- Document the new federation media worker endpoints in the [upgrade notes](https://element-hq.github.io/synapse/v1.111/upgrade.html) and [worker docs](https://element-hq.github.io/synapse/v1.111/workers.html). ([\#17421](https://github.com/element-hq/synapse/issues/17421)) +No significant changes since 1.131.0rc1. -### Internal Changes - -- Route authenticated federation media requests to media repository workers in Complement tests. ([\#17422](https://github.com/element-hq/synapse/issues/17422)) - - - - -# Synapse 1.111.0rc1 (2024-07-09) +# Synapse 1.131.0rc1 (2025-05-28) ### Features -- Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17320](https://github.com/element-hq/synapse/issues/17320)) -- Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17337](https://github.com/element-hq/synapse/issues/17337)) -- Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17342](https://github.com/element-hq/synapse/issues/17342)) -- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding [`_matrix/client/v1/media/download`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediadownloadservernamemediaid) endpoint. ([\#17365](https://github.com/element-hq/synapse/issues/17365)) -- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md) - by adding [`_matrix/client/v1/media/thumbnail`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediathumbnailservernamemediaid), [`_matrix/federation/v1/media/thumbnail`](https://spec.matrix.org/v1.11/server-server-api/#get_matrixfederationv1mediathumbnailmediaid) endpoints and stabilizing the - remaining [`_matrix/client/v1/media`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediaconfig) endpoints. ([\#17388](https://github.com/element-hq/synapse/issues/17388)) -- Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17395](https://github.com/element-hq/synapse/issues/17395)) -- Forget all of a user's rooms upon deactivation, preventing local room purges from being blocked on deactivated users. ([\#17400](https://github.com/element-hq/synapse/issues/17400)) -- Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). ([\#17403](https://github.com/element-hq/synapse/issues/17403)) -- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow overriding the introspection endpoint. ([\#17406](https://github.com/element-hq/synapse/issues/17406)) +- Add `msc4263_limit_key_queries_to_users_who_share_rooms` config option as per [MSC4263](https://github.com/matrix-org/matrix-spec-proposals/pull/4263). ([\#18180](https://github.com/element-hq/synapse/issues/18180)) +- Add option to allow registrations that begin with `_`. Contributed by `_` (@hex5f). ([\#18262](https://github.com/element-hq/synapse/issues/18262)) +- Include room ID in response to the [Room Deletion Status Admin API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#status-of-deleting-rooms). ([\#18318](https://github.com/element-hq/synapse/issues/18318)) +- Add support for calling Policy Servers ([MSC4284](https://github.com/matrix-org/matrix-spec-proposals/pull/4284)) to mark events as spam. ([\#18387](https://github.com/element-hq/synapse/issues/18387)) ### Bugfixes -- Fix rare race which caused no new to-device messages to be received from remote server. ([\#17362](https://github.com/element-hq/synapse/issues/17362)) -- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. ([\#17398](https://github.com/element-hq/synapse/issues/17398)) +- Prevent race-condition in `_maybe_retry_device_resync` entrance. ([\#18391](https://github.com/element-hq/synapse/issues/18391)) +- Fix the `tests.handlers.test_worker_lock.WorkerLockTestCase.test_lock_contention` test which could spuriously time out on RISC-V architectures due to performance differences. ([\#18430](https://github.com/element-hq/synapse/issues/18430)) +- Fix admin redaction endpoint not redacting encrypted messages. ([\#18434](https://github.com/element-hq/synapse/issues/18434)) ### Improved Documentation -- Clarify that `url_preview_url_blacklist` is a usability feature. ([\#17356](https://github.com/element-hq/synapse/issues/17356)) -- Fix broken links in README. ([\#17379](https://github.com/element-hq/synapse/issues/17379)) -- Clarify that changelog content *and file extension* need to match in order for entries to merge. ([\#17399](https://github.com/element-hq/synapse/issues/17399)) +- Update `room_list_publication_rules` docs to consider defaults that changed in v1.126.0. Contributed by @HarHarLinks. ([\#18286](https://github.com/element-hq/synapse/issues/18286)) +- Add advice for upgrading between major PostgreSQL versions to the database documentation. ([\#18445](https://github.com/element-hq/synapse/issues/18445)) ### Internal Changes -- Make the release script create a release branch for Complement as well. ([\#17318](https://github.com/element-hq/synapse/issues/17318)) -- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363)) -- Add CI check for the README. ([\#17367](https://github.com/element-hq/synapse/issues/17367)) -- Fix linting errors from new `ruff` version. ([\#17381](https://github.com/element-hq/synapse/issues/17381), [\#17411](https://github.com/element-hq/synapse/issues/17411)) -- Fix building debian packages on non-clean checkouts. ([\#17390](https://github.com/element-hq/synapse/issues/17390)) -- Finish up work to allow per-user feature flags. ([\#17392](https://github.com/element-hq/synapse/issues/17392), [\#17410](https://github.com/element-hq/synapse/issues/17410)) -- Allow enabling sliding sync per-user. ([\#17393](https://github.com/element-hq/synapse/issues/17393)) +- Fix a memory leak in `_NotifierUserStream`. ([\#18380](https://github.com/element-hq/synapse/issues/18380)) +- Fix a couple type annotations in the `RootConfig`/`Config`. ([\#18409](https://github.com/element-hq/synapse/issues/18409)) +- Explicitly enable PyPy builds in `cibuildwheel`s config to avoid it being disabled on a future upgrade to `cibuildwheel` v3. ([\#18417](https://github.com/element-hq/synapse/issues/18417)) +- Update the PR review template to remove an erroneous line break from the final bullet point. ([\#18419](https://github.com/element-hq/synapse/issues/18419)) +- Explain why we `flush_buffer()` for Python `print(...)` output. ([\#18420](https://github.com/element-hq/synapse/issues/18420)) +- Add lint to ensure we don't add a `CREATE/DROP INDEX` in a schema delta. ([\#18440](https://github.com/element-hq/synapse/issues/18440)) +- Allow checking only for the existence of a field in an SSO provider's response, rather than requiring the value(s) to check. ([\#18454](https://github.com/element-hq/synapse/issues/18454)) +- Add unit tests for homeserver usage statistics. ([\#18463](https://github.com/element-hq/synapse/issues/18463)) +- Don't move invited users to new room when shutting down room. ([\#18471](https://github.com/element-hq/synapse/issues/18471)) ### Updates to locked dependencies -* Bump certifi from 2023.7.22 to 2024.7.4. ([\#17404](https://github.com/element-hq/synapse/issues/17404)) -* Bump cryptography from 42.0.7 to 42.0.8. ([\#17382](https://github.com/element-hq/synapse/issues/17382)) -* Bump ijson from 3.2.3 to 3.3.0. ([\#17413](https://github.com/element-hq/synapse/issues/17413)) -* Bump log from 0.4.21 to 0.4.22. ([\#17384](https://github.com/element-hq/synapse/issues/17384)) -* Bump mypy-zope from 1.0.4 to 1.0.5. ([\#17414](https://github.com/element-hq/synapse/issues/17414)) -* Bump pillow from 10.3.0 to 10.4.0. ([\#17412](https://github.com/element-hq/synapse/issues/17412)) -* Bump pydantic from 2.7.1 to 2.8.2. ([\#17415](https://github.com/element-hq/synapse/issues/17415)) -* Bump ruff from 0.3.7 to 0.5.0. ([\#17381](https://github.com/element-hq/synapse/issues/17381)) -* Bump serde from 1.0.203 to 1.0.204. ([\#17409](https://github.com/element-hq/synapse/issues/17409)) -* Bump serde_json from 1.0.117 to 1.0.120. ([\#17385](https://github.com/element-hq/synapse/issues/17385), [\#17408](https://github.com/element-hq/synapse/issues/17408)) -* Bump types-setuptools from 69.5.0.20240423 to 70.1.0.20240627. ([\#17380](https://github.com/element-hq/synapse/issues/17380)) - -# Synapse 1.110.0 (2024-07-03) +* Bump actions/setup-python from 5.5.0 to 5.6.0. ([\#18398](https://github.com/element-hq/synapse/issues/18398)) +* Bump authlib from 1.5.1 to 1.5.2. ([\#18452](https://github.com/element-hq/synapse/issues/18452)) +* Bump docker/build-push-action from 6.15.0 to 6.17.0. ([\#18397](https://github.com/element-hq/synapse/issues/18397), [\#18449](https://github.com/element-hq/synapse/issues/18449)) +* Bump lxml from 5.3.0 to 5.4.0. ([\#18480](https://github.com/element-hq/synapse/issues/18480)) +* Bump mypy-zope from 1.0.9 to 1.0.11. ([\#18428](https://github.com/element-hq/synapse/issues/18428)) +* Bump pyo3 from 0.23.5 to 0.24.2. ([\#18460](https://github.com/element-hq/synapse/issues/18460)) +* Bump pyo3-log from 0.12.3 to 0.12.4. ([\#18453](https://github.com/element-hq/synapse/issues/18453)) +* Bump pyopenssl from 25.0.0 to 25.1.0. ([\#18450](https://github.com/element-hq/synapse/issues/18450)) +* Bump ruff from 0.7.3 to 0.11.11. ([\#18451](https://github.com/element-hq/synapse/issues/18451), [\#18482](https://github.com/element-hq/synapse/issues/18482)) +* Bump tornado from 6.4.2 to 6.5.0. ([\#18459](https://github.com/element-hq/synapse/issues/18459)) +* Bump setuptools from 72.1.0 to 78.1.1. ([\#18461](https://github.com/element-hq/synapse/issues/18461)) +* Bump types-jsonschema from 4.23.0.20241208 to 4.23.0.20250516. ([\#18481](https://github.com/element-hq/synapse/issues/18481)) +* Bump types-requests from 2.32.0.20241016 to 2.32.0.20250328. ([\#18427](https://github.com/element-hq/synapse/issues/18427)) -No significant changes since 1.110.0rc3. - - - - -# Synapse 1.110.0rc3 (2024-07-02) +# Synapse 1.130.0 (2025-05-20) ### Bugfixes -- Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0. ([\#17386](https://github.com/element-hq/synapse/issues/17386), [\#17391](https://github.com/element-hq/synapse/issues/17391)) - -### Internal Changes - -- Limit size of presence EDUs to 50 entries. ([\#17371](https://github.com/element-hq/synapse/issues/17371)) -- Fix building debian package for debian sid. ([\#17389](https://github.com/element-hq/synapse/issues/17389)) - - - - -# Synapse 1.110.0rc2 (2024-06-26) - -### Internal Changes - -- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363)) +- Fix startup being blocked on creating a new index that was introduced in v1.130.0rc1. ([\#18439](https://github.com/element-hq/synapse/issues/18439)) +- Fix the ordering of local messages in rooms that were affected by [GHSA-v56r-hwv5-mxg6](https://github.com/advisories/GHSA-v56r-hwv5-mxg6). ([\#18447](https://github.com/element-hq/synapse/issues/18447)) -# Synapse 1.110.0rc1 (2024-06-26) +# Synapse 1.130.0rc1 (2025-05-13) ### Features -- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17187](https://github.com/element-hq/synapse/issues/17187)) -- Add experimental support for [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255)) -- Improve ratelimiting in Synapse. ([\#17256](https://github.com/element-hq/synapse/issues/17256)) -- Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. ([\#17270](https://github.com/element-hq/synapse/issues/17270), [\#17296](https://github.com/element-hq/synapse/issues/17296)) -- Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api). ([\#17276](https://github.com/element-hq/synapse/issues/17276)) -- Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17277](https://github.com/element-hq/synapse/issues/17277)) -- Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17281](https://github.com/element-hq/synapse/issues/17281)) -- Include user membership in events served to clients, per [MSC4115](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17282](https://github.com/element-hq/synapse/issues/17282)) -- Do not require user-interactive authentication for uploading cross-signing keys for the first time, per [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967). ([\#17284](https://github.com/element-hq/synapse/issues/17284)) -- Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17293](https://github.com/element-hq/synapse/issues/17293)) -- `register_new_matrix_user` now supports a --password-file flag, which - is useful for scripting. ([\#17294](https://github.com/element-hq/synapse/issues/17294)) -- `register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database. - This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304)) -- Add support for via query parameter from [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156). ([\#17322](https://github.com/element-hq/synapse/issues/17322)) -- Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335)) -- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350)) - -### Bugfixes - -- Fix searching for users with their exact localpart whose ID includes a hyphen. ([\#17254](https://github.com/element-hq/synapse/issues/17254)) -- Fix wrong retention policy being used when filtering events. ([\#17272](https://github.com/element-hq/synapse/issues/17272)) -- Fix bug where OTKs were not always included in `/sync` response when using workers. ([\#17275](https://github.com/element-hq/synapse/issues/17275)) -- Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error. ([\#17283](https://github.com/element-hq/synapse/issues/17283)) -- Fix edge case in `/sync` returning the wrong the state when using sharded event persisters. ([\#17295](https://github.com/element-hq/synapse/issues/17295)) -- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17301](https://github.com/element-hq/synapse/issues/17301)) -- Fix email notification subject when invited to a space. ([\#17336](https://github.com/element-hq/synapse/issues/17336)) - -### Improved Documentation - -- Add missing quotes for example for `exclude_rooms_from_sync`. ([\#17308](https://github.com/element-hq/synapse/issues/17308)) -- Update header in the README to visually fix the the auto-generated table of contents. ([\#17329](https://github.com/element-hq/synapse/issues/17329)) -- Fix stale references to the Foundation's Security Disclosure Policy. ([\#17341](https://github.com/element-hq/synapse/issues/17341)) -- Add default values for `rc_invites.per_issuer` to docs. ([\#17347](https://github.com/element-hq/synapse/issues/17347)) -- Fix an error in the docs for `search_all_users` parameter under `user_directory`. ([\#17348](https://github.com/element-hq/synapse/issues/17348)) - -### Internal Changes - -- Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes. ([\#17198](https://github.com/element-hq/synapse/issues/17198)) -- Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation. ([\#17265](https://github.com/element-hq/synapse/issues/17265)) -- Add debug logging for when room keys are uploaded, including whether they are replacing other room keys. ([\#17266](https://github.com/element-hq/synapse/issues/17266)) -- Handle OTK uploads off master. ([\#17271](https://github.com/element-hq/synapse/issues/17271)) -- Don't try and resync devices for remote users whose servers are marked as down. ([\#17273](https://github.com/element-hq/synapse/issues/17273)) -- Re-organize Pydantic models and types used in handlers. ([\#17279](https://github.com/element-hq/synapse/issues/17279)) -- Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`. ([\#17300](https://github.com/element-hq/synapse/issues/17300)) -- Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering. ([\#17324](https://github.com/element-hq/synapse/issues/17324)) -- Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC. ([\#17331](https://github.com/element-hq/synapse/issues/17331)) -- Handle device lists notifications for large accounts more efficiently in worker mode. ([\#17333](https://github.com/element-hq/synapse/issues/17333), [\#17358](https://github.com/element-hq/synapse/issues/17358)) -- Do not block event sending/receiving while calculating large event auth chains. ([\#17338](https://github.com/element-hq/synapse/issues/17338)) -- Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak). ([\#17339](https://github.com/element-hq/synapse/issues/17339)) - - - -### Updates to locked dependencies - -* Bump authlib from 1.3.0 to 1.3.1. ([\#17343](https://github.com/element-hq/synapse/issues/17343)) -* Bump dawidd6/action-download-artifact from 3.1.4 to 5. ([\#17289](https://github.com/element-hq/synapse/issues/17289)) -* Bump dawidd6/action-download-artifact from 5 to 6. ([\#17313](https://github.com/element-hq/synapse/issues/17313)) -* Bump docker/build-push-action from 5 to 6. ([\#17312](https://github.com/element-hq/synapse/issues/17312)) -* Bump jinja2 from 3.1.3 to 3.1.4. ([\#17287](https://github.com/element-hq/synapse/issues/17287)) -* Bump lazy_static from 1.4.0 to 1.5.0. ([\#17355](https://github.com/element-hq/synapse/issues/17355)) -* Bump msgpack from 1.0.7 to 1.0.8. ([\#17317](https://github.com/element-hq/synapse/issues/17317)) -* Bump netaddr from 1.2.1 to 1.3.0. ([\#17353](https://github.com/element-hq/synapse/issues/17353)) -* Bump packaging from 24.0 to 24.1. ([\#17352](https://github.com/element-hq/synapse/issues/17352)) -* Bump phonenumbers from 8.13.37 to 8.13.39. ([\#17315](https://github.com/element-hq/synapse/issues/17315)) -* Bump regex from 1.10.4 to 1.10.5. ([\#17290](https://github.com/element-hq/synapse/issues/17290)) -* Bump requests from 2.31.0 to 2.32.2. ([\#17345](https://github.com/element-hq/synapse/issues/17345)) -* Bump sentry-sdk from 2.1.1 to 2.3.1. ([\#17263](https://github.com/element-hq/synapse/issues/17263)) -* Bump sentry-sdk from 2.3.1 to 2.6.0. ([\#17351](https://github.com/element-hq/synapse/issues/17351)) -* Bump tornado from 6.4 to 6.4.1. ([\#17344](https://github.com/element-hq/synapse/issues/17344)) -* Bump mypy from 1.8.0 to 1.9.0. ([\#17297](https://github.com/element-hq/synapse/issues/17297)) -* Bump types-jsonschema from 4.21.0.20240311 to 4.22.0.20240610. ([\#17288](https://github.com/element-hq/synapse/issues/17288)) -* Bump types-netaddr from 1.2.0.20240219 to 1.3.0.20240530. ([\#17314](https://github.com/element-hq/synapse/issues/17314)) -* Bump types-pillow from 10.2.0.20240423 to 10.2.0.20240520. ([\#17285](https://github.com/element-hq/synapse/issues/17285)) -* Bump types-pyyaml from 6.0.12.12 to 6.0.12.20240311. ([\#17316](https://github.com/element-hq/synapse/issues/17316)) -* Bump typing-extensions from 4.11.0 to 4.12.2. ([\#17354](https://github.com/element-hq/synapse/issues/17354)) -* Bump urllib3 from 2.0.7 to 2.2.2. ([\#17346](https://github.com/element-hq/synapse/issues/17346)) - -# Synapse 1.109.0 (2024-06-18) - -### Internal Changes - -- Fix the building of binary wheels for macOS by switching to macOS 12 CI runners. ([\#17319](https://github.com/element-hq/synapse/issues/17319)) - - - - -# Synapse 1.109.0rc3 (2024-06-17) - -### Bugfixes - -- When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL. ([\#17305](https://github.com/element-hq/synapse/issues/17305), [\#17309](https://github.com/element-hq/synapse/issues/17309)) - -### Internal Changes - -- Use the release branch for sytest in release-branch PRs. ([\#17306](https://github.com/element-hq/synapse/issues/17306)) - - - - -# Synapse 1.109.0rc2 (2024-06-11) +- Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks. ([\#18214](https://github.com/element-hq/synapse/issues/18214)) +- Add config option `user_directory.exclude_remote_users` which, when enabled, excludes remote users from user directory search results. ([\#18300](https://github.com/element-hq/synapse/issues/18300)) +- Add support for handling `GET /devices/` on workers. ([\#18355](https://github.com/element-hq/synapse/issues/18355)) ### Bugfixes -- Fix bug where one-time-keys were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275)) -- Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. ([\#17292](https://github.com/element-hq/synapse/issues/17292)) - - +- Fix a longstanding bug where Synapse would immediately retry a failing push endpoint when a new event is received, ignoring any backoff timers. ([\#18363](https://github.com/element-hq/synapse/issues/18363)) +- Pass leave from remote invite rejection down Sliding Sync. ([\#18375](https://github.com/element-hq/synapse/issues/18375)) +### Updates to the Docker image -# Synapse 1.109.0rc1 (2024-06-04) - -### Features - -- Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details. ([\#17147](https://github.com/element-hq/synapse/issues/17147)) -- Add experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync/e2ee` endpoint for to-device messages and device encryption info. ([\#17167](https://github.com/element-hq/synapse/issues/17167)) -- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/issues/3916) by adding unstable media endpoints to `/_matrix/client`. ([\#17213](https://github.com/element-hq/synapse/issues/17213)) -- Add logging to tasks managed by the task scheduler, showing CPU and database usage. ([\#17219](https://github.com/element-hq/synapse/issues/17219)) - -### Bugfixes - -- Fix deduplicating of membership events to not create unused state groups. ([\#17164](https://github.com/element-hq/synapse/issues/17164)) -- Fix bug where duplicate events could be sent down sync when using workers that are overloaded. ([\#17215](https://github.com/element-hq/synapse/issues/17215)) -- Ignore attempts to send to-device messages to bad users, to avoid log spam when we try to connect to the bad server. ([\#17240](https://github.com/element-hq/synapse/issues/17240)) -- Fix handling of duplicate concurrent uploading of device one-time-keys. ([\#17241](https://github.com/element-hq/synapse/issues/17241)) -- Fix reporting of default tags to Sentry, such as worker name. Broke in v1.108.0. ([\#17251](https://github.com/element-hq/synapse/issues/17251)) -- Fix bug where typing updates would not be sent when using workers after a restart. ([\#17252](https://github.com/element-hq/synapse/issues/17252)) +- In `configure_workers_and_start.py`, use the same absolute path of Python in the interpreter shebang, and invoke child Python processes with `sys.executable`. ([\#18291](https://github.com/element-hq/synapse/issues/18291)) +- Optimize the build of the workers image. ([\#18292](https://github.com/element-hq/synapse/issues/18292)) +- In `start_for_complement.sh`, replace some external program calls with shell builtins. ([\#18293](https://github.com/element-hq/synapse/issues/18293)) +- When generating container scripts from templates, don't add a leading newline so that their shebangs may be handled correctly. ([\#18295](https://github.com/element-hq/synapse/issues/18295)) ### Improved Documentation -- Update the LemonLDAP documentation to say that claims should be explicitly included in the returned `id_token`, as Synapse won't request them. ([\#17204](https://github.com/element-hq/synapse/issues/17204)) +- Improve formatting of the README file. ([\#18218](https://github.com/element-hq/synapse/issues/18218)) +- Add documentation for configuring [Pocket ID](https://github.com/pocket-id/pocket-id) as an OIDC provider. ([\#18237](https://github.com/element-hq/synapse/issues/18237)) +- Fix typo in docs about the `push` config option. Contributed by @HarHarLinks. ([\#18320](https://github.com/element-hq/synapse/issues/18320)) +- Add `/_matrix/federation/v1/version` to list of federation endpoints that can be handled by workers. ([\#18377](https://github.com/element-hq/synapse/issues/18377)) +- Add an Admin API endpoint `GET /_synapse/admin/v1/scheduled_tasks` to fetch scheduled tasks. ([\#18384](https://github.com/element-hq/synapse/issues/18384)) ### Internal Changes -- Improve DB usage when fetching related events. ([\#17083](https://github.com/element-hq/synapse/issues/17083)) -- Log exceptions when failing to auto-join new user according to the `auto_join_rooms` option. ([\#17176](https://github.com/element-hq/synapse/issues/17176)) -- Reduce work of calculating outbound device lists updates. ([\#17211](https://github.com/element-hq/synapse/issues/17211)) -- Improve performance of calculating device lists changes in `/sync`. ([\#17216](https://github.com/element-hq/synapse/issues/17216)) -- Move towards using `MultiWriterIdGenerator` everywhere. ([\#17226](https://github.com/element-hq/synapse/issues/17226)) -- Replaces all usages of `StreamIdGenerator` with `MultiWriterIdGenerator`. ([\#17229](https://github.com/element-hq/synapse/issues/17229)) -- Change the `allow_unsafe_locale` config option to also apply when setting up new databases. ([\#17238](https://github.com/element-hq/synapse/issues/17238)) -- Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module. ([\#17239](https://github.com/element-hq/synapse/issues/17239), [\#17246](https://github.com/element-hq/synapse/issues/17246)) -- Clean out invalid destinations from `device_federation_outbox` table. ([\#17242](https://github.com/element-hq/synapse/issues/17242)) -- Stop logging errors when receiving invalid User IDs in key querys requests. ([\#17250](https://github.com/element-hq/synapse/issues/17250)) +- Return specific error code when adding an email address / phone number to account is not supported ([MSC4178](https://github.com/matrix-org/matrix-spec-proposals/pull/4178)). ([\#17578](https://github.com/element-hq/synapse/issues/17578)) +- Stop auto-provisionning missing users & devices when delegating auth to Matrix Authentication Service. Requires MAS 0.13.0 or later. ([\#18181](https://github.com/element-hq/synapse/issues/18181)) +- Apply file hashing and existing quarantines to media downloaded for URL previews. ([\#18297](https://github.com/element-hq/synapse/issues/18297)) +- Allow a few admin APIs used by matrix-authentication-service to run on workers. ([\#18313](https://github.com/element-hq/synapse/issues/18313)) +- Apply `should_drop_federated_event` to federation invites. ([\#18330](https://github.com/element-hq/synapse/issues/18330)) +- Allow `/rooms/` admin API to be run on workers. ([\#18360](https://github.com/element-hq/synapse/issues/18360)) +- Minor performance improvements to the notifier. ([\#18367](https://github.com/element-hq/synapse/issues/18367)) +- Slight performance increase when using the ratelimiter. ([\#18369](https://github.com/element-hq/synapse/issues/18369)) +- Don't validate the `at_hash` (access token hash) field in OIDC ID Tokens if we don't end up actually using the OIDC Access Token. ([\#18374](https://github.com/element-hq/synapse/issues/18374), [\#18385](https://github.com/element-hq/synapse/issues/18385)) +- Fixed test failures when using authlib 1.5.2. ([\#18390](https://github.com/element-hq/synapse/issues/18390)) +- Refactor [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Simplified Sliding Sync room list tests to cover both new and fallback logic paths. ([\#18399](https://github.com/element-hq/synapse/issues/18399)) ### Updates to locked dependencies -* Bump anyhow from 1.0.83 to 1.0.86. ([\#17220](https://github.com/element-hq/synapse/issues/17220)) -* Bump bcrypt from 4.1.2 to 4.1.3. ([\#17224](https://github.com/element-hq/synapse/issues/17224)) -* Bump lxml from 5.2.1 to 5.2.2. ([\#17261](https://github.com/element-hq/synapse/issues/17261)) -* Bump mypy-zope from 1.0.3 to 1.0.4. ([\#17262](https://github.com/element-hq/synapse/issues/17262)) -* Bump phonenumbers from 8.13.35 to 8.13.37. ([\#17235](https://github.com/element-hq/synapse/issues/17235)) -* Bump prometheus-client from 0.19.0 to 0.20.0. ([\#17233](https://github.com/element-hq/synapse/issues/17233)) -* Bump pyasn1 from 0.5.1 to 0.6.0. ([\#17223](https://github.com/element-hq/synapse/issues/17223)) -* Bump pyicu from 2.13 to 2.13.1. ([\#17236](https://github.com/element-hq/synapse/issues/17236)) -* Bump pyopenssl from 24.0.0 to 24.1.0. ([\#17234](https://github.com/element-hq/synapse/issues/17234)) -* Bump serde from 1.0.201 to 1.0.202. ([\#17221](https://github.com/element-hq/synapse/issues/17221)) -* Bump serde from 1.0.202 to 1.0.203. ([\#17232](https://github.com/element-hq/synapse/issues/17232)) -* Bump twine from 5.0.0 to 5.1.0. ([\#17225](https://github.com/element-hq/synapse/issues/17225)) -* Bump types-psycopg2 from 2.9.21.20240311 to 2.9.21.20240417. ([\#17222](https://github.com/element-hq/synapse/issues/17222)) -* Bump types-pyopenssl from 24.0.0.20240311 to 24.1.0.20240425. ([\#17260](https://github.com/element-hq/synapse/issues/17260)) - -# Synapse 1.108.0 (2024-05-28) - -No significant changes since 1.108.0rc1. - - - +* Bump actions/add-to-project from 280af8ae1f83a494cfad2cb10f02f6d13529caa9 to 5b1a254a3546aef88e0a7724a77a623fa2e47c36. ([\#18365](https://github.com/element-hq/synapse/issues/18365)) +* Bump actions/download-artifact from 4.2.1 to 4.3.0. ([\#18364](https://github.com/element-hq/synapse/issues/18364)) +* Bump actions/setup-go from 5.4.0 to 5.5.0. ([\#18426](https://github.com/element-hq/synapse/issues/18426)) +* Bump anyhow from 1.0.97 to 1.0.98. ([\#18336](https://github.com/element-hq/synapse/issues/18336)) +* Bump packaging from 24.2 to 25.0. ([\#18393](https://github.com/element-hq/synapse/issues/18393)) +* Bump pillow from 11.1.0 to 11.2.1. ([\#18429](https://github.com/element-hq/synapse/issues/18429)) +* Bump pydantic from 2.10.3 to 2.11.4. ([\#18394](https://github.com/element-hq/synapse/issues/18394)) +* Bump pyo3-log from 0.12.2 to 0.12.3. ([\#18317](https://github.com/element-hq/synapse/issues/18317)) +* Bump pyopenssl from 24.3.0 to 25.0.0. ([\#18315](https://github.com/element-hq/synapse/issues/18315)) +* Bump sha2 from 0.10.8 to 0.10.9. ([\#18395](https://github.com/element-hq/synapse/issues/18395)) +* Bump sigstore/cosign-installer from 3.8.1 to 3.8.2. ([\#18366](https://github.com/element-hq/synapse/issues/18366)) +* Bump softprops/action-gh-release from 1 to 2. ([\#18264](https://github.com/element-hq/synapse/issues/18264)) +* Bump stefanzweifel/git-auto-commit-action from 5.1.0 to 5.2.0. ([\#18354](https://github.com/element-hq/synapse/issues/18354)) +* Bump txredisapi from 1.4.10 to 1.4.11. ([\#18392](https://github.com/element-hq/synapse/issues/18392)) +* Bump types-jsonschema from 4.23.0.20240813 to 4.23.0.20241208. ([\#18305](https://github.com/element-hq/synapse/issues/18305)) +* Bump types-psycopg2 from 2.9.21.20250121 to 2.9.21.20250318. ([\#18316](https://github.com/element-hq/synapse/issues/18316)) -# Synapse 1.108.0rc1 (2024-05-21) +# Synapse 1.129.0 (2025-05-06) -### Features +No significant changes since 1.129.0rc2. -- Add a feature that allows clients to query the configured federation whitelist. Disabled by default. ([\#16848](https://github.com/element-hq/synapse/issues/16848), [\#17199](https://github.com/element-hq/synapse/issues/17199)) -- Add the ability to allow numeric user IDs with a specific prefix when in the CAS flow. Contributed by Aurélien Grimpard. ([\#17098](https://github.com/element-hq/synapse/issues/17098)) -### Bugfixes -- Fix bug where push rules would be empty in `/sync` for some accounts. Introduced in v1.93.0. ([\#17142](https://github.com/element-hq/synapse/issues/17142)) -- Add support for optional whitespace around the Federation API's `Authorization` header's parameter commas. ([\#17145](https://github.com/element-hq/synapse/issues/17145)) -- Fix bug where disabling room publication prevented public rooms being created on workers. ([\#17177](https://github.com/element-hq/synapse/issues/17177), [\#17184](https://github.com/element-hq/synapse/issues/17184)) -### Improved Documentation +# Synapse 1.129.0rc2 (2025-04-30) -- Document [`/v1/make_knock`](https://spec.matrix.org/v1.10/server-server-api/#get_matrixfederationv1make_knockroomiduserid) and [`/v1/send_knock/`](https://spec.matrix.org/v1.10/server-server-api/#put_matrixfederationv1send_knockroomideventid) federation endpoints as worker-compatible. ([\#17058](https://github.com/element-hq/synapse/issues/17058)) -- Update User Admin API with note about prefixing OIDC external_id providers. ([\#17139](https://github.com/element-hq/synapse/issues/17139)) -- Clarify the state of the created room when using the `autocreate_auto_join_room_preset` config option. ([\#17150](https://github.com/element-hq/synapse/issues/17150)) -- Update the Admin FAQ with the current libjemalloc version for latest Debian stable. Additionally update the name of the "push_rules" stream in the Workers documentation. ([\#17171](https://github.com/element-hq/synapse/issues/17171)) +Synapse 1.129.0rc1 was never formally released due to regressions discovered during the release process. 1.129.0rc2 fixes those regressions by reverting the affected PRs. ### Internal Changes -- Add note to reflect that [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) is closed but will remain supported for some time. ([\#17151](https://github.com/element-hq/synapse/issues/17151)) -- Update dependency PyO3 to 0.21. ([\#17162](https://github.com/element-hq/synapse/issues/17162)) -- Fixes linter errors found in PR #17147. ([\#17166](https://github.com/element-hq/synapse/issues/17166)) -- Bump black from 24.2.0 to 24.4.2. ([\#17170](https://github.com/element-hq/synapse/issues/17170)) -- Cache literal sync filter validation for performance. ([\#17186](https://github.com/element-hq/synapse/issues/17186)) -- Improve performance by fixing a reactor pause. ([\#17192](https://github.com/element-hq/synapse/issues/17192)) -- Route `/make_knock` and `/send_knock` federation APIs to the federation reader worker in Complement test runs. ([\#17195](https://github.com/element-hq/synapse/issues/17195)) -- Prepare sync handler to be able to return different sync responses (`SyncVersion`). ([\#17200](https://github.com/element-hq/synapse/issues/17200)) -- Organize the sync cache key parameter outside of the sync config (separate concerns). ([\#17201](https://github.com/element-hq/synapse/issues/17201)) -- Refactor `SyncResultBuilder` assembly to its own function. ([\#17202](https://github.com/element-hq/synapse/issues/17202)) -- Rename to be obvious: `joined_rooms` -> `joined_room_ids`. ([\#17203](https://github.com/element-hq/synapse/issues/17203), [\#17208](https://github.com/element-hq/synapse/issues/17208)) -- Add a short pause when rate-limiting a request. ([\#17210](https://github.com/element-hq/synapse/issues/17210)) - - - -### Updates to locked dependencies - -* Bump cryptography from 42.0.5 to 42.0.7. ([\#17180](https://github.com/element-hq/synapse/issues/17180)) -* Bump gitpython from 3.1.41 to 3.1.43. ([\#17181](https://github.com/element-hq/synapse/issues/17181)) -* Bump immutabledict from 4.1.0 to 4.2.0. ([\#17179](https://github.com/element-hq/synapse/issues/17179)) -* Bump sentry-sdk from 1.40.3 to 2.1.1. ([\#17178](https://github.com/element-hq/synapse/issues/17178)) -* Bump serde from 1.0.200 to 1.0.201. ([\#17183](https://github.com/element-hq/synapse/issues/17183)) -* Bump serde_json from 1.0.116 to 1.0.117. ([\#17182](https://github.com/element-hq/synapse/issues/17182)) +- Revert the slow background update introduced by [\#18068](https://github.com/element-hq/synapse/issues/18068) in v1.128.0. ([\#18372](https://github.com/element-hq/synapse/issues/18372)) +- Revert "Add total event, unencrypted message, and e2ee event counts to stats reporting", added in v1.129.0rc1. ([\#18373](https://github.com/element-hq/synapse/issues/18373)) -Synapse 1.107.0 (2024-05-14) -============================ -No significant changes since 1.107.0rc1. -# Synapse 1.107.0rc1 (2024-05-07) +# Synapse 1.129.0rc1 (2025-04-15) ### Features -- Add preliminary support for [MSC3823: Account Suspension](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#17051](https://github.com/element-hq/synapse/issues/17051)) -- Declare support for [Matrix v1.10](https://matrix.org/blog/2024/03/22/matrix-v1.10-release/). Contributed by @clokep. ([\#17082](https://github.com/element-hq/synapse/issues/17082)) -- Add support for [MSC4115: membership metadata on events](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17104](https://github.com/element-hq/synapse/issues/17104), [\#17137](https://github.com/element-hq/synapse/issues/17137)) +- Add `passthrough_authorization_parameters` in OIDC configuration to allow passing parameters to the authorization grant URL. ([\#18232](https://github.com/element-hq/synapse/issues/18232)) +- Add `total_event_count`, `total_message_count`, and `total_e2ee_event_count` fields to the homeserver usage statistics. ([\#18260](https://github.com/element-hq/synapse/issues/18260)) ### Bugfixes -- Fixed search feature of Element Android on homesevers using SQLite by returning search terms as search highlights. ([\#17000](https://github.com/element-hq/synapse/issues/17000)) -- Fixes a bug introduced in v1.52.0 where the `destination` query parameter for the [Destination Rooms Admin API](https://element-hq.github.io/synapse/v1.105/usage/administration/admin_api/federation.html#destination-rooms) failed to actually filter returned rooms. ([\#17077](https://github.com/element-hq/synapse/issues/17077)) -- For MSC3266 room summaries, support queries at the recommended endpoint of `/_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`. The existing endpoint of `/_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` is deprecated. ([\#17078](https://github.com/element-hq/synapse/issues/17078)) -- Apply user email & picture during OIDC registration if present & selected. ([\#17120](https://github.com/element-hq/synapse/issues/17120)) -- Improve error message for cross signing reset with [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) enabled. ([\#17121](https://github.com/element-hq/synapse/issues/17121)) -- Fix a bug which meant that to-device messages received over federation could be dropped when the server was under load or networking problems caused problems between Synapse processes or the database. ([\#17127](https://github.com/element-hq/synapse/issues/17127)) -- Fix bug where `StreamChangeCache` would not respect configured cache factors. ([\#17152](https://github.com/element-hq/synapse/issues/17152)) +- Fix `force_tracing_for_users` config when using delegated auth. ([\#18334](https://github.com/element-hq/synapse/issues/18334)) +- Fix the token introspection cache logging access tokens when MAS integration is in use. ([\#18335](https://github.com/element-hq/synapse/issues/18335)) +- Stop caching introspection failures when delegating auth to MAS. ([\#18339](https://github.com/element-hq/synapse/issues/18339)) +- Fix `ExternalIDReuse` exception after migrating to MAS on workers with a high traffic. ([\#18342](https://github.com/element-hq/synapse/issues/18342)) +- Fix minor performance regression caused by tracking of room participation. Regressed in v1.128.0. ([\#18345](https://github.com/element-hq/synapse/issues/18345)) ### Updates to the Docker image -- Correct licensing metadata on Docker image. ([\#17141](https://github.com/element-hq/synapse/issues/17141)) - -### Improved Documentation - -- Update the `event_cache_size` and `global_factor` configuration options' documentation. ([\#17071](https://github.com/element-hq/synapse/issues/17071)) -- Remove broken sphinx docs. ([\#17073](https://github.com/element-hq/synapse/issues/17073), [\#17148](https://github.com/element-hq/synapse/issues/17148)) -- Add RuntimeDirectory to example matrix-synapse.service systemd unit. ([\#17084](https://github.com/element-hq/synapse/issues/17084)) -- Fix various small typos throughout the docs. ([\#17114](https://github.com/element-hq/synapse/issues/17114)) -- Update enable_notifs configuration documentation. ([\#17116](https://github.com/element-hq/synapse/issues/17116)) -- Update the Upgrade Notes with the latest minimum supported Rust version of 1.66.0. Contributed by @jahway603. ([\#17140](https://github.com/element-hq/synapse/issues/17140)) +- Optimize the build of the complement-synapse image. ([\#18294](https://github.com/element-hq/synapse/issues/18294)) ### Internal Changes -- Enable [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) by default in the Synapse Complement image. ([\#17105](https://github.com/element-hq/synapse/issues/17105)) -- Add optimisation to `StreamChangeCache.get_entities_changed(..)`. ([\#17130](https://github.com/element-hq/synapse/issues/17130)) - +- Disable statement timeout during room purge. ([\#18133](https://github.com/element-hq/synapse/issues/18133)) +- Add cache to storage functions used to auth requests when using delegated auth. ([\#18337](https://github.com/element-hq/synapse/issues/18337)) -### Updates to locked dependencies -* Bump furo from 2024.1.29 to 2024.4.27. ([\#17133](https://github.com/element-hq/synapse/issues/17133)) -* Bump idna from 3.6 to 3.7. ([\#17136](https://github.com/element-hq/synapse/issues/17136)) -* Bump jsonschema from 4.21.1 to 4.22.0. ([\#17157](https://github.com/element-hq/synapse/issues/17157)) -* Bump lxml from 5.1.0 to 5.2.1. ([\#17158](https://github.com/element-hq/synapse/issues/17158)) -* Bump phonenumbers from 8.13.29 to 8.13.35. ([\#17106](https://github.com/element-hq/synapse/issues/17106)) -- Bump pillow from 10.2.0 to 10.3.0. ([\#17146](https://github.com/element-hq/synapse/issues/17146)) -* Bump pydantic from 2.6.4 to 2.7.0. ([\#17107](https://github.com/element-hq/synapse/issues/17107)) -* Bump pydantic from 2.7.0 to 2.7.1. ([\#17160](https://github.com/element-hq/synapse/issues/17160)) -* Bump pyicu from 2.12 to 2.13. ([\#17109](https://github.com/element-hq/synapse/issues/17109)) -* Bump serde from 1.0.197 to 1.0.198. ([\#17111](https://github.com/element-hq/synapse/issues/17111)) -* Bump serde from 1.0.198 to 1.0.199. ([\#17132](https://github.com/element-hq/synapse/issues/17132)) -* Bump serde from 1.0.199 to 1.0.200. ([\#17161](https://github.com/element-hq/synapse/issues/17161)) -* Bump serde_json from 1.0.115 to 1.0.116. ([\#17112](https://github.com/element-hq/synapse/issues/17112)) -- Update `tornado` Python dependency from 6.2 to 6.4. ([\#17131](https://github.com/element-hq/synapse/issues/17131)) -* Bump twisted from 23.10.0 to 24.3.0. ([\#17135](https://github.com/element-hq/synapse/issues/17135)) -* Bump types-bleach from 6.1.0.1 to 6.1.0.20240331. ([\#17110](https://github.com/element-hq/synapse/issues/17110)) -* Bump types-pillow from 10.2.0.20240415 to 10.2.0.20240423. ([\#17159](https://github.com/element-hq/synapse/issues/17159)) -* Bump types-setuptools from 69.0.0.20240125 to 69.5.0.20240423. ([\#17134](https://github.com/element-hq/synapse/issues/17134)) -# Synapse 1.106.0 (2024-04-30) +# Synapse 1.128.0 (2025-04-08) -No significant changes since 1.106.0rc1. +No significant changes since 1.128.0rc1. -# Synapse 1.106.0rc1 (2024-04-25) +# Synapse 1.128.0rc1 (2025-04-01) ### Features -- Send an email if the address is already bound to an user account. ([\#16819](https://github.com/element-hq/synapse/issues/16819)) -- Implement the rendezvous mechanism described by [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/issues/4108). ([\#17056](https://github.com/element-hq/synapse/issues/17056)) -- Support delegating the rendezvous mechanism described [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/issues/4108) to an external implementation. ([\#17086](https://github.com/element-hq/synapse/issues/17086)) +- Add an access token introspection cache to make Matrix Authentication Service integration ([MSC3861](https://github.com/matrix-org/matrix-doc/pull/3861)) more efficient. ([\#18231](https://github.com/element-hq/synapse/issues/18231)) +- Add background job to clear unreferenced state groups. ([\#18254](https://github.com/element-hq/synapse/issues/18254)) +- Hashes of media files are now tracked by Synapse. Media quarantines will now apply to all files with the same hash. ([\#18277](https://github.com/element-hq/synapse/issues/18277), [\#18302](https://github.com/element-hq/synapse/issues/18302), [\#18296](https://github.com/element-hq/synapse/issues/18296)) ### Bugfixes -- Add validation to ensure that the `limit` parameter on `/publicRooms` is non-negative. ([\#16920](https://github.com/element-hq/synapse/issues/16920)) -- Return `400 M_NOT_JSON` upon receiving invalid JSON in query parameters across various client and admin endpoints, rather than an internal server error. ([\#16923](https://github.com/element-hq/synapse/issues/16923)) -- Make the CSAPI endpoint `/keys/device_signing/upload` idempotent. ([\#16943](https://github.com/element-hq/synapse/issues/16943)) -- Redact membership events if the user requested erasure upon deactivating. ([\#17076](https://github.com/element-hq/synapse/issues/17076)) - -### Improved Documentation - -- Add a prompt in the contributing guide to manually configure icu4c. ([\#17069](https://github.com/element-hq/synapse/issues/17069)) -- Clarify what part of message retention is still experimental. ([\#17099](https://github.com/element-hq/synapse/issues/17099)) - -### Internal Changes - -- Use new receipts column to optimise receipt and push action SQL queries. Contributed by Nick @ Beeper (@fizzadar). ([\#17032](https://github.com/element-hq/synapse/issues/17032), [\#17096](https://github.com/element-hq/synapse/issues/17096)) -- Fix mypy with latest Twisted release. ([\#17036](https://github.com/element-hq/synapse/issues/17036)) -- Bump minimum supported Rust version to 1.66.0. ([\#17079](https://github.com/element-hq/synapse/issues/17079)) -- Add helpers to transform Twisted requests to Rust http Requests/Responses. ([\#17081](https://github.com/element-hq/synapse/issues/17081)) -- Fix type annotation for `visited_chains` after `mypy` upgrade. ([\#17125](https://github.com/element-hq/synapse/issues/17125)) - - - -### Updates to locked dependencies - -* Bump anyhow from 1.0.81 to 1.0.82. ([\#17095](https://github.com/element-hq/synapse/issues/17095)) -* Bump peaceiris/actions-gh-pages from 3.9.3 to 4.0.0. ([\#17087](https://github.com/element-hq/synapse/issues/17087)) -* Bump peaceiris/actions-mdbook from 1.2.0 to 2.0.0. ([\#17089](https://github.com/element-hq/synapse/issues/17089)) -* Bump pyasn1-modules from 0.3.0 to 0.4.0. ([\#17093](https://github.com/element-hq/synapse/issues/17093)) -* Bump pygithub from 2.2.0 to 2.3.0. ([\#17092](https://github.com/element-hq/synapse/issues/17092)) -* Bump ruff from 0.3.5 to 0.3.7. ([\#17094](https://github.com/element-hq/synapse/issues/17094)) -* Bump sigstore/cosign-installer from 3.4.0 to 3.5.0. ([\#17088](https://github.com/element-hq/synapse/issues/17088)) -* Bump twine from 4.0.2 to 5.0.0. ([\#17091](https://github.com/element-hq/synapse/issues/17091)) -* Bump types-pillow from 10.2.0.20240406 to 10.2.0.20240415. ([\#17090](https://github.com/element-hq/synapse/issues/17090)) - -# Synapse 1.105.1 (2024-04-23) - -## Security advisory - -The following issues are fixed in 1.105.1. - -- [GHSA-3h7q-rfh9-xm4v](https://github.com/element-hq/synapse/security/advisories/GHSA-3h7q-rfh9-xm4v) / [CVE-2024-31208](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-31208) — High Severity - - Weakness in auth chain indexing allows DoS from remote room members through disk fill and high CPU usage. - -See the advisories for more details. If you have any questions, email security@element.io. - - - -# Synapse 1.105.0 (2024-04-16) - -No significant changes since 1.105.0rc1. - - - - -# Synapse 1.105.0rc1 (2024-04-11) - -### Features - -- Stabilize support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which clarifies the interaction of push rules and account data. Contributed by @clokep. ([\#17022](https://github.com/element-hq/synapse/issues/17022)) -- Stabilize support for [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981): `/relations` recursion. Contributed by @clokep. ([\#17023](https://github.com/element-hq/synapse/issues/17023)) -- Add support for moving `/pushrules` off of main process. ([\#17037](https://github.com/element-hq/synapse/issues/17037), [\#17038](https://github.com/element-hq/synapse/issues/17038)) - -### Bugfixes - -- Fix various long-standing bugs which could cause incorrect state to be returned from `/sync` in certain situations. ([\#16930](https://github.com/element-hq/synapse/issues/16930), [\#16932](https://github.com/element-hq/synapse/issues/16932), [\#16942](https://github.com/element-hq/synapse/issues/16942), [\#17064](https://github.com/element-hq/synapse/issues/17064), [\#17065](https://github.com/element-hq/synapse/issues/17065), [\#17066](https://github.com/element-hq/synapse/issues/17066)) -- Fix server notice rooms not always being created as unencrypted rooms, even when `encryption_enabled_by_default_for_room_type` is in use (server notices are always unencrypted). ([\#17033](https://github.com/element-hq/synapse/issues/17033)) -- Fix the `.m.rule.encrypted_room_one_to_one` and `.m.rule.room_one_to_one` default underride push rules being in the wrong order. Contributed by @Sumpy1. ([\#17043](https://github.com/element-hq/synapse/issues/17043)) - -### Internal Changes - -- Refactor auth chain fetching to reduce duplication. ([\#17044](https://github.com/element-hq/synapse/issues/17044)) -- Improve database performance by adding a missing index to `access_tokens.refresh_token_id`. ([\#17045](https://github.com/element-hq/synapse/issues/17045), [\#17054](https://github.com/element-hq/synapse/issues/17054)) -- Improve database performance by reducing number of receipts fetched when sending push notifications. ([\#17049](https://github.com/element-hq/synapse/issues/17049)) - - - -### Updates to locked dependencies - -* Bump packaging from 23.2 to 24.0. ([\#17027](https://github.com/element-hq/synapse/issues/17027)) -* Bump regex from 1.10.3 to 1.10.4. ([\#17028](https://github.com/element-hq/synapse/issues/17028)) -* Bump ruff from 0.3.2 to 0.3.5. ([\#17060](https://github.com/element-hq/synapse/issues/17060)) -* Bump serde_json from 1.0.114 to 1.0.115. ([\#17041](https://github.com/element-hq/synapse/issues/17041)) -* Bump types-pillow from 10.2.0.20240125 to 10.2.0.20240406. ([\#17061](https://github.com/element-hq/synapse/issues/17061)) -* Bump types-requests from 2.31.0.20240125 to 2.31.0.20240406. ([\#17063](https://github.com/element-hq/synapse/issues/17063)) -* Bump typing-extensions from 4.9.0 to 4.11.0. ([\#17062](https://github.com/element-hq/synapse/issues/17062)) - -# Synapse 1.104.0 (2024-04-02) - -### Bugfixes - -- Fix regression when using OIDC provider. Introduced in v1.104.0rc1. ([\#17031](https://github.com/element-hq/synapse/issues/17031)) - - -# Synapse 1.104.0rc1 (2024-03-26) - -### Features - -- Add an OIDC config to specify extra parameters for the authorization grant URL. IT can be useful to pass an ACR value for example. ([\#16971](https://github.com/element-hq/synapse/issues/16971)) -- Add support for OIDC provider returning JWT. ([\#16972](https://github.com/element-hq/synapse/issues/16972), [\#17031](https://github.com/element-hq/synapse/issues/17031)) - -### Bugfixes - -- Fix a bug which meant that, under certain circumstances, we might never retry sending events or to-device messages over federation after a failure. ([\#16925](https://github.com/element-hq/synapse/issues/16925)) -- Fix various long-standing bugs which could cause incorrect state to be returned from `/sync` in certain situations. ([\#16949](https://github.com/element-hq/synapse/issues/16949)) -- Fix case in which `m.fully_read` marker would not get updated. Contributed by @SpiritCroc. ([\#16990](https://github.com/element-hq/synapse/issues/16990)) -- Fix bug which did not retract a user's pending knocks at rooms when their account was deactivated. Contributed by @hanadi92. ([\#17010](https://github.com/element-hq/synapse/issues/17010)) +- Add index to sliding sync ([MSC4186](https://github.com/matrix-org/matrix-doc/pull/4186)) membership snapshot table, to fix a performance issue. ([\#18074](https://github.com/element-hq/synapse/issues/18074)) ### Updates to the Docker image -- Updated `start.py` to generate config using the correct user ID when running as root (fixes [\#16824](https://github.com/element-hq/synapse/issues/16824), [\#15202](https://github.com/element-hq/synapse/issues/15202)). ([\#16978](https://github.com/element-hq/synapse/issues/16978)) +- Specify the architecture of installed packages via an APT config option, which is more reliable than appending package names with `:{arch}`. ([\#18271](https://github.com/element-hq/synapse/issues/18271)) +- Always specify base image debian versions with a build argument. ([\#18272](https://github.com/element-hq/synapse/issues/18272)) +- Allow passing arguments to `start_for_complement.sh` (to be sent to `configure_workers_and_start.py`). ([\#18273](https://github.com/element-hq/synapse/issues/18273)) +- Make some improvements to the `prefix-log` script in the workers image. ([\#18274](https://github.com/element-hq/synapse/issues/18274)) +- Use `uv pip` to install `supervisor` in the worker image. ([\#18275](https://github.com/element-hq/synapse/issues/18275)) +- Avoid needing to download & use `rsync` in a build layer. ([\#18287](https://github.com/element-hq/synapse/issues/18287)) ### Improved Documentation -- Add a query to force a refresh of a remote user's device list to the "Useful SQL for Admins" documentation page. ([\#16892](https://github.com/element-hq/synapse/issues/16892)) -- Minor grammatical corrections to the upgrade documentation. ([\#16965](https://github.com/element-hq/synapse/issues/16965)) -- Fix the sort order for the documentation version picker, so that newer releases appear above older ones. ([\#16966](https://github.com/element-hq/synapse/issues/16966)) -- Remove recommendation for a specific poetry version from contributing guide. ([\#17002](https://github.com/element-hq/synapse/issues/17002)) +- Fix how to obtain access token and change naming from riot to element ([\#18225](https://github.com/element-hq/synapse/issues/18225)) +- Correct a small typo in the SSO mapping providers documentation. ([\#18276](https://github.com/element-hq/synapse/issues/18276)) +- Add docs for how to clear out the Poetry wheel cache. ([\#18283](https://github.com/element-hq/synapse/issues/18283)) ### Internal Changes -- Improve lock performance when a lot of locks are all waiting for a single lock to be released. ([\#16840](https://github.com/element-hq/synapse/issues/16840)) -- Update power level default for public rooms. ([\#16907](https://github.com/element-hq/synapse/issues/16907)) -- Improve event validation. ([\#16908](https://github.com/element-hq/synapse/issues/16908)) -- Multi-worker-docker-container: disable log buffering. ([\#16919](https://github.com/element-hq/synapse/issues/16919)) -- Refactor state delta calculation in `/sync` handler. ([\#16929](https://github.com/element-hq/synapse/issues/16929)) -- Clarify docs for some room state functions. ([\#16950](https://github.com/element-hq/synapse/issues/16950)) -- Specify IP subnets in canonical form. ([\#16953](https://github.com/element-hq/synapse/issues/16953)) -- As done for SAML mapping provider, let's pass the module API to the OIDC one so the mapper can do more logic in its code. ([\#16974](https://github.com/element-hq/synapse/issues/16974)) -- Allow containers building on top of Synapse's Complement container is use the included PostgreSQL cluster. ([\#16985](https://github.com/element-hq/synapse/issues/16985)) -- Raise poetry-core version cap to 1.9.0. ([\#16986](https://github.com/element-hq/synapse/issues/16986)) -- Patch the db conn pool sooner in tests. ([\#17017](https://github.com/element-hq/synapse/issues/17017)) +- Add a column `participant` to `room_memberships` table. ([\#18068](https://github.com/element-hq/synapse/issues/18068)) +- Update Poetry to 2.1.1, including updating the lock file version. ([\#18251](https://github.com/element-hq/synapse/issues/18251)) +- Pin GitHub Actions dependencies by commit hash. ([\#18255](https://github.com/element-hq/synapse/issues/18255)) +- Add DB delta to remove the old state group deletion job. ([\#18284](https://github.com/element-hq/synapse/issues/18284)) ### Updates to locked dependencies -* Bump anyhow from 1.0.80 to 1.0.81. ([\#17009](https://github.com/element-hq/synapse/issues/17009)) -* Bump black from 23.10.1 to 24.2.0. ([\#16936](https://github.com/element-hq/synapse/issues/16936)) -* Bump cryptography from 41.0.7 to 42.0.5. ([\#16958](https://github.com/element-hq/synapse/issues/16958)) -* Bump dawidd6/action-download-artifact from 3.1.1 to 3.1.2. ([\#16960](https://github.com/element-hq/synapse/issues/16960)) -* Bump dawidd6/action-download-artifact from 3.1.2 to 3.1.4. ([\#17008](https://github.com/element-hq/synapse/issues/17008)) -* Bump jinja2 from 3.1.2 to 3.1.3. ([\#17005](https://github.com/element-hq/synapse/issues/17005)) -* Bump log from 0.4.20 to 0.4.21. ([\#16977](https://github.com/element-hq/synapse/issues/16977)) -* Bump mypy from 1.5.1 to 1.8.0. ([\#16901](https://github.com/element-hq/synapse/issues/16901)) -* Bump netaddr from 0.9.0 to 1.2.1. ([\#17006](https://github.com/element-hq/synapse/issues/17006)) -* Bump pydantic from 2.6.0 to 2.6.4. ([\#17004](https://github.com/element-hq/synapse/issues/17004)) -* Bump pyo3 from 0.20.2 to 0.20.3. ([\#16962](https://github.com/element-hq/synapse/issues/16962)) -* Bump ruff from 0.1.14 to 0.3.2. ([\#16994](https://github.com/element-hq/synapse/issues/16994)) -* Bump serde from 1.0.196 to 1.0.197. ([\#16963](https://github.com/element-hq/synapse/issues/16963)) -* Bump serde_json from 1.0.113 to 1.0.114. ([\#16961](https://github.com/element-hq/synapse/issues/16961)) -* Bump types-jsonschema from 4.21.0.20240118 to 4.21.0.20240311. ([\#17007](https://github.com/element-hq/synapse/issues/17007)) -* Bump types-psycopg2 from 2.9.21.16 to 2.9.21.20240311. ([\#16995](https://github.com/element-hq/synapse/issues/16995)) -* Bump types-pyopenssl from 23.3.0.0 to 24.0.0.20240311. ([\#17003](https://github.com/element-hq/synapse/issues/17003)) +* Bump actions/add-to-project from f5473ace9aeee8b97717b281e26980aa5097023f to 280af8ae1f83a494cfad2cb10f02f6d13529caa9. ([\#18303](https://github.com/element-hq/synapse/issues/18303)) +* Bump actions/cache from 4.2.2 to 4.2.3. ([\#18266](https://github.com/element-hq/synapse/issues/18266)) +* Bump actions/download-artifact from 4.2.0 to 4.2.1. ([\#18268](https://github.com/element-hq/synapse/issues/18268)) +* Bump actions/setup-python from 5.4.0 to 5.5.0. ([\#18298](https://github.com/element-hq/synapse/issues/18298)) +* Bump actions/upload-artifact from 4.6.1 to 4.6.2. ([\#18304](https://github.com/element-hq/synapse/issues/18304)) +* Bump authlib from 1.4.1 to 1.5.1. ([\#18306](https://github.com/element-hq/synapse/issues/18306)) +* Bump dawidd6/action-download-artifact from 8 to 9. ([\#18204](https://github.com/element-hq/synapse/issues/18204)) +* Bump jinja2 from 3.1.5 to 3.1.6. ([\#18223](https://github.com/element-hq/synapse/issues/18223)) +* Bump log from 0.4.26 to 0.4.27. ([\#18267](https://github.com/element-hq/synapse/issues/18267)) +* Bump phonenumbers from 8.13.50 to 9.0.2. ([\#18299](https://github.com/element-hq/synapse/issues/18299)) +* Bump pygithub from 2.5.0 to 2.6.1. ([\#18243](https://github.com/element-hq/synapse/issues/18243)) +* Bump pyo3-log from 0.12.1 to 0.12.2. ([\#18269](https://github.com/element-hq/synapse/issues/18269)) -# Synapse 1.103.0 (2024-03-19) +# Synapse 1.127.1 (2025-03-26) -No significant changes since 1.103.0rc1. +## Security +- Fix [CVE-2025-30355](https://www.cve.org/CVERecord?id=CVE-2025-30355) / [GHSA-v56r-hwv5-mxg6](https://github.com/element-hq/synapse/security/advisories/GHSA-v56r-hwv5-mxg6). **High severity vulnerability affecting federation. The vulnerability has been exploited in the wild.** +# Synapse 1.127.0 (2025-03-25) -# Synapse 1.103.0rc1 (2024-03-12) +No significant changes since 1.127.0rc1. -### Features -- Add a new [List Accounts v3](https://element-hq.github.io/synapse/v1.103/admin_api/user_admin_api.html#list-accounts-v3) Admin API with improved deactivated user filtering capabilities. ([\#16874](https://github.com/element-hq/synapse/issues/16874)) -- Include `Retry-After` header by default per [MSC4041](https://github.com/matrix-org/matrix-spec-proposals/pull/4041). Contributed by @clokep. ([\#16947](https://github.com/element-hq/synapse/issues/16947)) -### Bugfixes -- Fix joining remote rooms when a module uses the `on_new_event` callback. This callback may now pass partial state events instead of the full state for remote rooms. Introduced in v1.76.0. ([\#16973](https://github.com/element-hq/synapse/issues/16973)) -- Fix performance issue when joining very large rooms that can cause the server to lock up. Introduced in v1.100.0. Contributed by @ggogel. ([\#16968](https://github.com/element-hq/synapse/issues/16968)) - -### Improved Documentation - -- Add HAProxy example for single port operation to reverse proxy documentation. Contributed by Georg Pfuetzenreuter (@tacerus). ([\#16768](https://github.com/element-hq/synapse/issues/16768)) -- Improve the documentation around running Complement tests with new configuration parameters. ([\#16946](https://github.com/element-hq/synapse/issues/16946)) -- Add docs on upgrading from a very old version. ([\#16951](https://github.com/element-hq/synapse/issues/16951)) - - -### Updates to locked dependencies - -* Bump JasonEtco/create-an-issue from 2.9.1 to 2.9.2. ([\#16934](https://github.com/element-hq/synapse/issues/16934)) -* Bump anyhow from 1.0.79 to 1.0.80. ([\#16935](https://github.com/element-hq/synapse/issues/16935)) -* Bump dawidd6/action-download-artifact from 3.0.0 to 3.1.1. ([\#16933](https://github.com/element-hq/synapse/issues/16933)) -* Bump furo from 2023.9.10 to 2024.1.29. ([\#16939](https://github.com/element-hq/synapse/issues/16939)) -* Bump pyopenssl from 23.3.0 to 24.0.0. ([\#16937](https://github.com/element-hq/synapse/issues/16937)) -* Bump types-netaddr from 0.10.0.20240106 to 1.2.0.20240219. ([\#16938](https://github.com/element-hq/synapse/issues/16938)) - - -# Synapse 1.102.0 (2024-03-05) - -### Bugfixes - -- Revert https://github.com/element-hq/synapse/pull/16756, which caused incorrect notification counts on mobile clients since v1.100.0. ([\#16979](https://github.com/element-hq/synapse/issues/16979)) - - -# Synapse 1.102.0rc1 (2024-02-20) +# Synapse 1.127.0rc1 (2025-03-18) ### Features -- A metric was added for emails sent by Synapse, broken down by type: `synapse_emails_sent_total`. Contributed by Remi Rampin. ([\#16881](https://github.com/element-hq/synapse/issues/16881)) - -### Bugfixes - -- Do not send multiple concurrent requests for keys for the same server. ([\#16894](https://github.com/element-hq/synapse/issues/16894)) -- Fix performance issue when joining very large rooms that can cause the server to lock up. Introduced in v1.100.0. ([\#16903](https://github.com/element-hq/synapse/issues/16903)) -- Always prefer unthreaded receipt when >1 exist ([MSC4102](https://github.com/matrix-org/matrix-spec-proposals/pull/4102)). ([\#16927](https://github.com/element-hq/synapse/issues/16927)) - -### Improved Documentation - -- Fix a small typo in the Rooms section of the Admin API documentation. Contributed by @RainerZufall187. ([\#16857](https://github.com/element-hq/synapse/issues/16857)) - -### Internal Changes - -- Don't invalidate the entire event cache when we purge history. ([\#16905](https://github.com/element-hq/synapse/issues/16905)) -- Add experimental config option to not send device list updates for specific users. ([\#16909](https://github.com/element-hq/synapse/issues/16909)) -- Fix incorrect docker hub link in release script. ([\#16910](https://github.com/element-hq/synapse/issues/16910)) - - - -### Updates to locked dependencies - -* Bump attrs from 23.1.0 to 23.2.0. ([\#16899](https://github.com/element-hq/synapse/issues/16899)) -* Bump bcrypt from 4.0.1 to 4.1.2. ([\#16900](https://github.com/element-hq/synapse/issues/16900)) -* Bump pygithub from 2.1.1 to 2.2.0. ([\#16902](https://github.com/element-hq/synapse/issues/16902)) -* Bump sentry-sdk from 1.40.0 to 1.40.3. ([\#16898](https://github.com/element-hq/synapse/issues/16898)) - -# Synapse 1.101.0 (2024-02-13) - -### Bugfixes - -- Fix performance regression when fetching auth chains from the DB. Introduced in v1.100.0. ([\#16893](https://github.com/element-hq/synapse/issues/16893)) - - - - -# Synapse 1.101.0rc1 (2024-02-06) +- Update [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140) implementation to no longer cancel a user's own delayed state events with an event type & state key that match a more recent state event sent by that user. ([\#17810](https://github.com/element-hq/synapse/issues/17810)) ### Improved Documentation -- Fix broken links in the documentation. ([\#16853](https://github.com/element-hq/synapse/issues/16853)) -- Update MacOS installation instructions to mention that libicu is optional. ([\#16854](https://github.com/element-hq/synapse/issues/16854)) -- The version picker now correctly lists versions after `v1.98.0`. ([\#16880](https://github.com/element-hq/synapse/issues/16880)) +- Fixed a minor typo in the Synapse documentation. Contributed by @karuto12. ([\#18224](https://github.com/element-hq/synapse/issues/18224)) ### Internal Changes -- Add support for stabilised [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981) that adds a `recurse` parameter on the `/relations` API. ([\#16842](https://github.com/element-hq/synapse/issues/16842)) +- Remove undocumented `SYNAPSE_USE_FROZEN_DICTS` environment variable. ([\#18123](https://github.com/element-hq/synapse/issues/18123)) +- Fix detection of workflow failures in the release script. ([\#18211](https://github.com/element-hq/synapse/issues/18211)) +- Add caching support to media endpoints. ([\#18235](https://github.com/element-hq/synapse/issues/18235)) ### Updates to locked dependencies -* Bump dorny/paths-filter from 2 to 3. ([\#16869](https://github.com/element-hq/synapse/issues/16869)) -* Bump gitpython from 3.1.40 to 3.1.41. ([\#16850](https://github.com/element-hq/synapse/issues/16850)) -* Bump hiredis from 2.2.3 to 2.3.2. ([\#16862](https://github.com/element-hq/synapse/issues/16862)) -* Bump jsonschema from 4.20.0 to 4.21.1. ([\#16887](https://github.com/element-hq/synapse/issues/16887)) -* Bump lxml-stubs from 0.4.0 to 0.5.1. ([\#16885](https://github.com/element-hq/synapse/issues/16885)) -* Bump mypy-zope from 1.0.1 to 1.0.3. ([\#16865](https://github.com/element-hq/synapse/issues/16865)) -* Bump phonenumbers from 8.13.26 to 8.13.29. ([\#16868](https://github.com/element-hq/synapse/issues/16868)) -* Bump pydantic from 2.5.3 to 2.6.0. ([\#16888](https://github.com/element-hq/synapse/issues/16888)) -* Bump sentry-sdk from 1.39.1 to 1.40.0. ([\#16889](https://github.com/element-hq/synapse/issues/16889)) -* Bump serde from 1.0.195 to 1.0.196. ([\#16867](https://github.com/element-hq/synapse/issues/16867)) -* Bump serde_json from 1.0.111 to 1.0.113. ([\#16866](https://github.com/element-hq/synapse/issues/16866)) -* Bump sigstore/cosign-installer from 3.3.0 to 3.4.0. ([\#16890](https://github.com/element-hq/synapse/issues/16890)) -* Bump types-pillow from 10.1.0.2 to 10.2.0.20240125. ([\#16864](https://github.com/element-hq/synapse/issues/16864)) -* Bump types-requests from 2.31.0.10 to 2.31.0.20240125. ([\#16886](https://github.com/element-hq/synapse/issues/16886)) -* Bump types-setuptools from 69.0.0.0 to 69.0.0.20240125. ([\#16863](https://github.com/element-hq/synapse/issues/16863)) +* Bump anyhow from 1.0.96 to 1.0.97. ([\#18201](https://github.com/element-hq/synapse/issues/18201)) +* Bump bcrypt from 4.2.1 to 4.3.0. ([\#18207](https://github.com/element-hq/synapse/issues/18207)) +* Bump bytes from 1.10.0 to 1.10.1. ([\#18227](https://github.com/element-hq/synapse/issues/18227)) +* Bump http from 1.2.0 to 1.3.1. ([\#18245](https://github.com/element-hq/synapse/issues/18245)) +* Bump sentry-sdk from 2.19.2 to 2.22.0. ([\#18205](https://github.com/element-hq/synapse/issues/18205)) +* Bump serde from 1.0.218 to 1.0.219. ([\#18228](https://github.com/element-hq/synapse/issues/18228)) +* Bump serde_json from 1.0.139 to 1.0.140. ([\#18202](https://github.com/element-hq/synapse/issues/18202)) +* Bump ulid from 1.2.0 to 1.2.1. ([\#18246](https://github.com/element-hq/synapse/issues/18246)) -# Synapse 1.100.0 (2024-01-30) +# Synapse 1.126.0 (2025-03-11) +Administrators using the Debian/Ubuntu packages from `packages.matrix.org`, please check +[the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/release-v1.126/docs/upgrade.md#change-of-signing-key-expiry-date-for-the-debianubuntu-package-repository) +as we have recently updated the expiry date on the repository's GPG signing key. The old version of the key will expire on `2025-03-15`. -No significant changes since 1.100.0rc3. +No significant changes since 1.126.0rc3. -# Synapse 1.100.0rc3 (2024-01-24) +# Synapse 1.126.0rc3 (2025-03-07) ### Bugfixes -- Fix database performance regression due to changing Postgres table statistics. Introduced in v1.100.0rc1. ([\#16849](https://github.com/element-hq/synapse/issues/16849)) - - - - -# Synapse 1.100.0rc2 (2024-01-24) - -This version is the same as 1.100.0rc1 but with fixes to the release process. - -### Internal Changes - -- Downgrade the `download-artifact` and `upload-artifact` actions to v3 due to breaking changes. ([\#16847](https://github.com/element-hq/synapse/issues/16847)) - - -# Synapse 1.100.0rc1 (2024-01-23) - -*This version was never released to PyPI or the Debian repository due to failures in the automatic part of the release process.* +- Revert the background job to clear unreferenced state groups (that was introduced in v1.126.0rc1), due to [a suspected issue](https://github.com/element-hq/synapse/issues/18217) that causes increased disk usage. ([\#18222](https://github.com/element-hq/synapse/issues/18222)) -### Features - -- Advertise experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) through `/_matrix/clients/versions` if enabled. Contributed by @hanadi92. ([\#16787](https://github.com/element-hq/synapse/issues/16787)) -### Bugfixes -- Handle wildcard type filters properly for room messages endpoint. Contributed by Mo Balaa. ([\#14984](https://github.com/element-hq/synapse/issues/14984)) -### Improved Documentation +# Synapse 1.126.0rc2 (2025-03-05) -- Add a link to the "Request log format" explainer on the "Logging sample config" documentation page. ([\#16778](https://github.com/element-hq/synapse/issues/16778)) -- Fix broken links in issue templates and documentation. ([\#16810](https://github.com/element-hq/synapse/issues/16810)) -- NGINX listen http2 deprecation in documentation template for reverse proxy. ([\#16831](https://github.com/element-hq/synapse/issues/16831)) ### Internal Changes -- Faster partial join to room with complex auth graph. ([\#7](https://github.com/element-hq/synapse/issues/7)) -- Improve DB performance of calculating badge counts for push. ([\#16756](https://github.com/element-hq/synapse/issues/16756)) -- Split up deleting devices into batches. ([\#16766](https://github.com/element-hq/synapse/issues/16766)) -- Remove CI check for sign-off as we require a CLA signature instead. ([\#16776](https://github.com/element-hq/synapse/issues/16776)) -- Ensure CI fails when linting fails to make sure auto-merge does the correct thing. ([\#16781](https://github.com/element-hq/synapse/issues/16781)) -- Faster load recents for sync by reducing amount of state pulled out. ([\#16783](https://github.com/element-hq/synapse/issues/16783)) -- Reduce amount of state pulled out when querying federation hierachy. ([\#16785](https://github.com/element-hq/synapse/issues/16785)) -- Pull less state out of the DB when we retry fetching old events during backfill. ([\#16788](https://github.com/element-hq/synapse/issues/16788)) -- Optimize query for fetching to-device messages in `/sync`. ([\#16805](https://github.com/element-hq/synapse/issues/16805)) -- Reject OIDC config when `client_secret` isn't specified, but the auth method requires one. ([\#16806](https://github.com/element-hq/synapse/issues/16806)) -- Allow room creation but not publishing to continue if room publication rules are violated when creating - a new room. ([\#16811](https://github.com/element-hq/synapse/issues/16811)) -- Bump minimum supported Rust version to 1.65.0. ([\#16818](https://github.com/element-hq/synapse/issues/16818)) -- Fixup copyright lines in file headers after the licensing change. ([\#16820](https://github.com/element-hq/synapse/issues/16820)) -- Add a `--generate-only` option to the internal configuration/launch script for Complement. ([\#16828](https://github.com/element-hq/synapse/issues/16828)) -- Preparatory work for tweaking performance of auth chain lookups. ([\#16833](https://github.com/element-hq/synapse/issues/16833)) -- Speed up e2e device keys queries for bot accounts. ([\#16841](https://github.com/element-hq/synapse/issues/16841)) +- Fix wheel building configuration in CI by installing libatomic1. ([\#18212](https://github.com/element-hq/synapse/issues/18212), [\#18213](https://github.com/element-hq/synapse/issues/18213)) -### Updates to locked dependencies +# Synapse 1.126.0rc1 (2025-03-04) -* Bump actions/cache from 3 to 4. ([\#16832](https://github.com/element-hq/synapse/issues/16832)) -* Bump actions/download-artifact from 3 to 4. ([\#16795](https://github.com/element-hq/synapse/issues/16795)) -* Bump actions/upload-artifact from 3 to 4. ([\#16796](https://github.com/element-hq/synapse/issues/16796)) -* Bump anyhow from 1.0.75 to 1.0.79. ([\#16789](https://github.com/element-hq/synapse/issues/16789)) -* Bump authlib from 1.2.1 to 1.3.0. ([\#16801](https://github.com/element-hq/synapse/issues/16801)) -* Bump dawidd6/action-download-artifact from 2.28.0 to 3.0.0. ([\#16794](https://github.com/element-hq/synapse/issues/16794)) -* Bump immutabledict from 4.0.0 to 4.1.0. ([\#16812](https://github.com/element-hq/synapse/issues/16812)) -* Bump isort from 5.13.1 to 5.13.2. ([\#16835](https://github.com/element-hq/synapse/issues/16835)) -* Bump lxml from 4.9.3 to 5.1.0. ([\#16813](https://github.com/element-hq/synapse/issues/16813)) -* Bump pillow from 10.1.0 to 10.2.0. ([\#16802](https://github.com/element-hq/synapse/issues/16802)) -* Bump pydantic from 2.5.2 to 2.5.3. ([\#16836](https://github.com/element-hq/synapse/issues/16836)) -* Bump pyo3 from 0.20.0 to 0.20.2. ([\#16791](https://github.com/element-hq/synapse/issues/16791)) -* Bump regex from 1.9.6 to 1.10.3. ([\#16837](https://github.com/element-hq/synapse/issues/16837)) -* Bump ruff from 0.1.13 to 0.1.14. ([\#16838](https://github.com/element-hq/synapse/issues/16838)) -* Bump ruff from 0.1.7 to 0.1.13. ([\#16814](https://github.com/element-hq/synapse/issues/16814)) -* Bump sentry-sdk from 1.35.0 to 1.39.1. ([\#16799](https://github.com/element-hq/synapse/issues/16799)) -* Bump serde_json from 1.0.108 to 1.0.111. ([\#16792](https://github.com/element-hq/synapse/issues/16792)) -* Bump service-identity from 23.1.0 to 24.1.0. ([\#16816](https://github.com/element-hq/synapse/issues/16816)) -* Bump types-commonmark from 0.9.2.4 to 0.9.2.20240106. ([\#16797](https://github.com/element-hq/synapse/issues/16797)) -* Bump types-jsonschema from 4.20.0.0 to 4.20.0.20240105. ([\#16800](https://github.com/element-hq/synapse/issues/16800)) -* Bump types-jsonschema from 4.20.0.20240105 to 4.21.0.20240118. ([\#16834](https://github.com/element-hq/synapse/issues/16834)) -* Bump types-netaddr from 0.9.0.1 to 0.10.0.20240106. ([\#16839](https://github.com/element-hq/synapse/issues/16839)) -* Bump typing-extensions from 4.8.0 to 4.9.0. ([\#16815](https://github.com/element-hq/synapse/issues/16815)) - - -# Synapse 1.99.0 (2024-01-16) - -Synapse 1.99.0 is the first Synapse release under an AGPLv3.0 licence (with CLA to enable Element to sell AGPL -exceptions). You can read more about this here: - - - https://matrix.org/blog/2023/11/06/future-of-synapse-dendrite/ - - https://element.io/blog/element-to-adopt-agplv3/ - - https://element.io/blog/synapse-now-lives-at-github-com-element-hq-synapse/ - -No significant changes since 1.99.0rc1. - - -# Synapse 1.99.0rc1 (2024-01-09) +Synapse 1.126.0rc1 was not fully released due to an error in CI. ### Features -- Add [config options](https://element-hq.github.io/synapse/v1.99/usage/configuration/config_documentation.html#server_notices) to set the avatar and the topic of the server notices room, as well as the avatar of the server notices user. ([\#16679](https://github.com/matrix-org/synapse/issues/16679)) -- Add config option [`email.notif_delay_before_mail`](https://element-hq.github.io/synapse/v1.99/usage/configuration/config_documentation.html#email) to tweak the delay before an email is sent following a notification. ([\#16696](https://github.com/matrix-org/synapse/issues/16696)) -- Add new configuration option [`sentry.environment`](https://element-hq.github.io/synapse/v1.99/usage/configuration/config_documentation.html#sentry) for improved system monitoring. Contributed by @zeeshanrafiqrana. ([\#16738](https://github.com/matrix-org/synapse/issues/16738)) -- Filter out rooms from the room directory being served to other homeservers when those rooms block that homeserver by their Access Control Lists. ([\#16759](https://github.com/element-hq/synapse/issues/16759)) +- Define ratelimit configuration for delayed event management. ([\#18019](https://github.com/element-hq/synapse/issues/18019)) +- Add `form_secret_path` config option. ([\#18090](https://github.com/element-hq/synapse/issues/18090)) +- Add the `--no-secrets-in-config` command line option. ([\#18092](https://github.com/element-hq/synapse/issues/18092)) +- Add background job to clear unreferenced state groups. ([\#18154](https://github.com/element-hq/synapse/issues/18154)) +- Add support for specifying/overriding `id_token_signing_alg_values_supported` for an OpenID identity provider. ([\#18177](https://github.com/element-hq/synapse/issues/18177)) +- Add `worker_replication_secret_path` config option. ([\#18191](https://github.com/element-hq/synapse/issues/18191)) +- Add support for specifying/overriding `redirect_uri` in the authorization and token requests against an OpenID identity provider. ([\#18197](https://github.com/element-hq/synapse/issues/18197)) ### Bugfixes -- Fix a long-standing bug where the signing keys generated by Synapse were world-readable. Contributed by Fabian Klemp. ([\#16740](https://github.com/matrix-org/synapse/issues/16740)) -- Fix email verification redirection. Contributed by Fadhlan Ridhwanallah. ([\#16761](https://github.com/element-hq/synapse/issues/16761)) -- Fixed a bug that prevented users from being queried by display name if it contains non-ASCII characters. ([\#16767](https://github.com/element-hq/synapse/issues/16767)) -- Allow reactivate user without password with Admin API in some edge cases. ([\#16770](https://github.com/element-hq/synapse/issues/16770)) -- Adds the `recursion_depth` parameter to the response of the /relations endpoint if MSC3981 recursion is being performed. ([\#16775](https://github.com/element-hq/synapse/issues/16775)) - -### Improved Documentation - -- Added version picker for Synapse documentation. Contributed by @Dmytro27Ind. ([\#16533](https://github.com/matrix-org/synapse/issues/16533)) -- Clarify that `password_config.enabled: "only_for_reauth"` does not allow new logins to be created using password auth. ([\#16737](https://github.com/matrix-org/synapse/issues/16737)) -- Remove value from header in configuration documentation for `refresh_token_lifetime`. ([\#16763](https://github.com/element-hq/synapse/issues/16763)) -- Add another custom statistics collection server to the documentation. Contributed by @loelkes. ([\#16769](https://github.com/element-hq/synapse/issues/16769)) - -### Internal Changes - -- Remove run-once workflow after adding the version picker to the documentation. ([\#9453](https://github.com/element-hq/synapse/issues/9453)) -- Update the implementation of [MSC2965](https://github.com/matrix-org/matrix-spec-proposals/pull/2965) (OIDC Provider discovery). ([\#16726](https://github.com/matrix-org/synapse/issues/16726)) -- Move the rust stubs inline for better IDE integration. ([\#16757](https://github.com/element-hq/synapse/issues/16757)) -- Fix sample config doc CI. ([\#16758](https://github.com/element-hq/synapse/issues/16758)) -- Simplify event internal metadata class. ([\#16762](https://github.com/element-hq/synapse/issues/16762), [\#16780](https://github.com/element-hq/synapse/issues/16780)) -- Sign the published docker image using [cosign](https://docs.sigstore.dev/). ([\#16774](https://github.com/element-hq/synapse/issues/16774)) -- Port `EventInternalMetadata` class to Rust. ([\#16782](https://github.com/element-hq/synapse/issues/16782)) - +- Make sure we advertise registration as disabled when [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) is enabled. ([\#17661](https://github.com/element-hq/synapse/issues/17661)) +- Prevent suspended users from sending encrypted messages. ([\#18157](https://github.com/element-hq/synapse/issues/18157)) +- Cleanup deleted state group references. ([\#18165](https://github.com/element-hq/synapse/issues/18165)) +- Fix [MSC4108 QR-code login](https://github.com/matrix-org/matrix-spec-proposals/pull/4108) not working with some reverse-proxy setups. ([\#18178](https://github.com/element-hq/synapse/issues/18178)) +- Support device IDs that can't be represented in a scope when delegating auth to Matrix Authentication Service 0.15.0+. ([\#18174](https://github.com/element-hq/synapse/issues/18174)) +### Updates to the Docker image -### Updates to locked dependencies - -* Bump actions/setup-go from 4 to 5. ([\#16749](https://github.com/matrix-org/synapse/issues/16749)) -* Bump actions/setup-python from 4 to 5. ([\#16748](https://github.com/matrix-org/synapse/issues/16748)) -* Bump immutabledict from 3.0.0 to 4.0.0. ([\#16743](https://github.com/matrix-org/synapse/issues/16743)) -* Bump isort from 5.12.0 to 5.13.0. ([\#16745](https://github.com/matrix-org/synapse/issues/16745)) -* Bump isort from 5.13.0 to 5.13.1. ([\#16752](https://github.com/matrix-org/synapse/issues/16752)) -* Bump pydantic from 2.5.1 to 2.5.2. ([\#16747](https://github.com/matrix-org/synapse/issues/16747)) -* Bump ruff from 0.1.6 to 0.1.7. ([\#16746](https://github.com/matrix-org/synapse/issues/16746)) -* Bump types-setuptools from 68.2.0.2 to 69.0.0.0. ([\#16744](https://github.com/matrix-org/synapse/issues/16744)) - -# Synapse 1.98.0 (2023-12-12) - -Synapse 1.98.0 will be the last Synapse release in 2023; the regular release cadence will resume in January 2024. - -Synapse will soon be forked by Element under an AGPLv3.0 licence (with CLA, for -proprietary dual licensing). You can read more about this here: - - - https://matrix.org/blog/2023/11/06/future-of-synapse-dendrite/ - - https://element.io/blog/element-to-adopt-agplv3/ - -The Matrix.org Foundation copy of the project will be archived. Any changes needed -by server administrators will be communicated via our usual announcements channels, -but we are striving to make this as seamless as possible. - - -No significant changes since 1.98.0rc1. - - - -# Synapse 1.98.0rc1 (2023-12-05) - -### Features - -- Synapse now declares support for Matrix v1.7, v1.8, and v1.9. ([\#16707](https://github.com/matrix-org/synapse/issues/16707)) -- Add `on_user_login` [module API](https://matrix-org.github.io/synapse/latest/modules/writing_a_module.html) callback for when a user logs in. ([\#15207](https://github.com/matrix-org/synapse/issues/15207)) -- Support [MSC4069: Inhibit profile propagation](https://github.com/matrix-org/matrix-spec-proposals/pull/4069). ([\#16636](https://github.com/matrix-org/synapse/issues/16636)) -- Restore tracking of requests and monthly active users when delegating authentication via [MSC3861](https://github.com/matrix-org/synapse/pull/16672) to an OIDC provider. ([\#16672](https://github.com/matrix-org/synapse/issues/16672)) -- Add an autojoin setting for server notices rooms, so users may be joined directly instead of receiving an invite. ([\#16699](https://github.com/matrix-org/synapse/issues/16699)) -- Follow redirects when downloading media over federation (per [MSC3860](https://github.com/matrix-org/matrix-spec-proposals/pull/3860)). ([\#16701](https://github.com/matrix-org/synapse/issues/16701)) - -### Bugfixes - -- Enable refreshable tokens on the admin registration endpoint. ([\#16642](https://github.com/matrix-org/synapse/issues/16642)) -- Consistently bypass rate limits when using the server notice admin API. ([\#16670](https://github.com/matrix-org/synapse/issues/16670)) -- Fix a bug introduced in Synapse 1.7.2 where rooms whose power levels lacked an `events` field could not be upgraded. ([\#16725](https://github.com/matrix-org/synapse/issues/16725)) -- Fix `GET /_synapse/admin/v1/federation/destinations` [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) returning null (instead of 0) for `retry_last_ts` and `retry_interval`. ([\#16729](https://github.com/matrix-org/synapse/issues/16729)) +- Speed up the building of the Docker image. ([\#18038](https://github.com/element-hq/synapse/issues/18038)) ### Improved Documentation -- Add schema rollback information to documentation. ([\#16661](https://github.com/matrix-org/synapse/issues/16661)) -- Fix poetry version typo in the [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html). ([\#16695](https://github.com/matrix-org/synapse/issues/16695)) -- Switch the example UNIX socket paths to `/run`. Add HAProxy example configuration for UNIX sockets. ([\#16700](https://github.com/matrix-org/synapse/issues/16700)) -- Add documentation for how to validate the configuration file with `synapse.config` script. ([\#16714](https://github.com/matrix-org/synapse/issues/16714)) - -### Internal Changes +- Move incorrectly placed version indicator in User Event Redaction Admin API docs. ([\#18152](https://github.com/element-hq/synapse/issues/18152)) +- Document suspension Admin API. ([\#18162](https://github.com/element-hq/synapse/issues/18162)) -- Clean-up unused tables. ([\#16522](https://github.com/matrix-org/synapse/issues/16522)) -- Reduce a little database load while processing state auth chains. ([\#16552](https://github.com/matrix-org/synapse/issues/16552)) -- Reduce database load of pruning old `user_ips`. ([\#16667](https://github.com/matrix-org/synapse/issues/16667)) -- Reduce DB load when forget on leave setting is disabled. ([\#16668](https://github.com/matrix-org/synapse/issues/16668)) -- Ignore `encryption_enabled_by_default_for_room_type` setting when creating server notices room, since the notices will be send unencrypted anyway. ([\#16677](https://github.com/matrix-org/synapse/issues/16677)) -- Correctly read the to-device stream ID on startup using SQLite. ([\#16682](https://github.com/matrix-org/synapse/issues/16682)) -- Reoranganise test files. ([\#16684](https://github.com/matrix-org/synapse/issues/16684)) -- Remove old full schema dumps which are no longer used. ([\#16697](https://github.com/matrix-org/synapse/issues/16697)) -- Raise poetry-core upper bound to <=1.8.1. This allows contributors to import Synapse after `poetry install`ing with Poetry 1.6 and above. Contributed by Mo Balaa. ([\#16702](https://github.com/matrix-org/synapse/issues/16702)) -- Add a workflow to try and automatically fixup linting in a PR. ([\#16704](https://github.com/matrix-org/synapse/issues/16704)) +### Deprecations and Removals +- Disable room list publication by default. ([\#18175](https://github.com/element-hq/synapse/issues/18175)) ### Updates to locked dependencies -* Bump cryptography from 41.0.5 to 41.0.6. ([\#16703](https://github.com/matrix-org/synapse/issues/16703)) -* Bump cryptography from 41.0.6 to 41.0.7. ([\#16721](https://github.com/matrix-org/synapse/issues/16721)) -* Bump idna from 3.4 to 3.6. ([\#16720](https://github.com/matrix-org/synapse/issues/16720)) -* Bump jsonschema from 4.19.1 to 4.20.0. ([\#16692](https://github.com/matrix-org/synapse/issues/16692)) -* Bump matrix-org/netlify-pr-preview from 2 to 3. ([\#16719](https://github.com/matrix-org/synapse/issues/16719)) -* Bump phonenumbers from 8.13.23 to 8.13.26. ([\#16722](https://github.com/matrix-org/synapse/issues/16722)) -* Bump prometheus-client from 0.18.0 to 0.19.0. ([\#16691](https://github.com/matrix-org/synapse/issues/16691)) -* Bump pyasn1 from 0.5.0 to 0.5.1. ([\#16689](https://github.com/matrix-org/synapse/issues/16689)) -* Bump pydantic from 2.4.2 to 2.5.1. ([\#16663](https://github.com/matrix-org/synapse/issues/16663)) -* Bump pyo3 (0.19.2→0.20.0), pythonize (0.19.0→0.20.0) and pyo3-log (0.8.1→0.9.0). ([\#16673](https://github.com/matrix-org/synapse/issues/16673)) -* Bump pyopenssl from 23.2.0 to 23.3.0. ([\#16662](https://github.com/matrix-org/synapse/issues/16662)) -* Bump ruff from 0.1.4 to 0.1.6. ([\#16690](https://github.com/matrix-org/synapse/issues/16690)) -* Bump sentry-sdk from 1.32.0 to 1.35.0. ([\#16666](https://github.com/matrix-org/synapse/issues/16666)) -* Bump serde from 1.0.192 to 1.0.193. ([\#16693](https://github.com/matrix-org/synapse/issues/16693)) -* Bump sphinx-autodoc2 from 0.4.2 to 0.5.0. ([\#16723](https://github.com/matrix-org/synapse/issues/16723)) -* Bump types-jsonschema from 4.19.0.4 to 4.20.0.0. ([\#16724](https://github.com/matrix-org/synapse/issues/16724)) -* Bump types-pillow from 10.1.0.0 to 10.1.0.2. ([\#16664](https://github.com/matrix-org/synapse/issues/16664)) -* Bump types-psycopg2 from 2.9.21.15 to 2.9.21.16. ([\#16665](https://github.com/matrix-org/synapse/issues/16665)) -* Bump types-setuptools from 68.2.0.0 to 68.2.0.2. ([\#16688](https://github.com/matrix-org/synapse/issues/16688)) - -# Synapse 1.97.0 (2023-11-28) - -Synapse will soon be forked by Element under an AGPLv3.0 licence (with CLA, for -proprietary dual licensing). You can read more about this here: +* Bump anyhow from 1.0.95 to 1.0.96. ([\#18187](https://github.com/element-hq/synapse/issues/18187)) +* Bump authlib from 1.4.0 to 1.4.1. ([\#18190](https://github.com/element-hq/synapse/issues/18190)) +* Bump click from 8.1.7 to 8.1.8. ([\#18189](https://github.com/element-hq/synapse/issues/18189)) +* Bump log from 0.4.25 to 0.4.26. ([\#18184](https://github.com/element-hq/synapse/issues/18184)) +* Bump pyo3-log from 0.12.0 to 0.12.1. ([\#18046](https://github.com/element-hq/synapse/issues/18046)) +* Bump serde from 1.0.217 to 1.0.218. ([\#18183](https://github.com/element-hq/synapse/issues/18183)) +* Bump serde_json from 1.0.138 to 1.0.139. ([\#18186](https://github.com/element-hq/synapse/issues/18186)) +* Bump sigstore/cosign-installer from 3.8.0 to 3.8.1. ([\#18185](https://github.com/element-hq/synapse/issues/18185)) +* Bump types-psycopg2 from 2.9.21.20241019 to 2.9.21.20250121. ([\#18188](https://github.com/element-hq/synapse/issues/18188)) - - https://matrix.org/blog/2023/11/06/future-of-synapse-dendrite/ - - https://element.io/blog/element-to-adopt-agplv3/ -The Matrix.org Foundation copy of the project will be archived. Any changes needed -by server administrators will be communicated via our usual announcements channels, -but we are striving to make this as seamless as possible. +# Synapse 1.125.0 (2025-02-25) +No significant changes since 1.125.0rc1. -No significant changes since 1.97.0rc1. - -# Synapse 1.97.0rc1 (2023-11-21) +# Synapse 1.125.0rc1 (2025-02-18) ### Features -- Add support for asynchronous uploads as defined by [MSC2246](https://github.com/matrix-org/matrix-spec-proposals/pull/2246). Contributed by @sumnerevans at @beeper. ([\#15503](https://github.com/matrix-org/synapse/issues/15503)) -- Improve the performance of some operations in multi-worker deployments. ([\#16613](https://github.com/matrix-org/synapse/issues/16613), [\#16616](https://github.com/matrix-org/synapse/issues/16616)) +- Add functionality to be able to use multiple values in SSO feature `attribute_requirements`. ([\#17949](https://github.com/element-hq/synapse/issues/17949)) +- Add experimental config options `admin_token_path` and `client_secret_path` for [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861). ([\#18004](https://github.com/element-hq/synapse/issues/18004)) +- Add `get_current_time_msec()` method to the [module API](https://matrix-org.github.io/synapse/latest/modules/writing_a_module.html) for sound time comparisons with Synapse. ([\#18144](https://github.com/element-hq/synapse/issues/18144)) ### Bugfixes -- Fix a long-standing bug where some queries updated the same row twice. Introduced in Synapse 1.57.0. ([\#16609](https://github.com/matrix-org/synapse/issues/16609)) -- Fix a long-standing bug where Synapse would not unbind third-party identifiers for Application Service users when deactivated and would not emit a compliant response. ([\#16617](https://github.com/matrix-org/synapse/issues/16617)) -- Fix sending out of order `POSITION` over replication, causing additional database load. ([\#16639](https://github.com/matrix-org/synapse/issues/16639)) - -### Improved Documentation - -- Note that the option [`outbound_federation_restricted_to`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#outbound_federation_restricted_to) was added in Synapse 1.89.0, and fix a nearby formatting error. ([\#16628](https://github.com/matrix-org/synapse/issues/16628)) -- Update parameter information for the `/timestamp_to_event` admin API. ([\#16631](https://github.com/matrix-org/synapse/issues/16631)) -- Provide an example for a common encrypted media response from the admin user media API and mention possible null values. ([\#16654](https://github.com/matrix-org/synapse/issues/16654)) - -### Internal Changes - -- Remove whole table locks on push rule modifications. Contributed by Nick @ Beeper (@fizzadar). ([\#16051](https://github.com/matrix-org/synapse/issues/16051)) -- Support reactor tick timings on more types of event loops. ([\#16532](https://github.com/matrix-org/synapse/issues/16532)) -- Improve type hints. ([\#16564](https://github.com/matrix-org/synapse/issues/16564), [\#16611](https://github.com/matrix-org/synapse/issues/16611), [\#16612](https://github.com/matrix-org/synapse/issues/16612)) -- Avoid executing no-op queries. ([\#16583](https://github.com/matrix-org/synapse/issues/16583)) -- Simplify persistence code to be per-room. ([\#16584](https://github.com/matrix-org/synapse/issues/16584)) -- Use standard SQL helpers in persistence code. ([\#16585](https://github.com/matrix-org/synapse/issues/16585)) -- Avoid updating the stream cache unnecessarily. ([\#16586](https://github.com/matrix-org/synapse/issues/16586)) -- Improve performance when using opentracing. ([\#16589](https://github.com/matrix-org/synapse/issues/16589)) -- Run push rule evaluator setup in parallel. ([\#16590](https://github.com/matrix-org/synapse/issues/16590)) -- Improve tests of the SQL generator. ([\#16596](https://github.com/matrix-org/synapse/issues/16596)) -- Use more generic database methods. ([\#16615](https://github.com/matrix-org/synapse/issues/16615)) -- Use `dbname` instead of the deprecated `database` connection parameter for psycopg2. ([\#16618](https://github.com/matrix-org/synapse/issues/16618)) -- Add an internal [Admin API endpoint](https://matrix-org.github.io/synapse/v1.97/usage/configuration/config_documentation.html#allow-replacing-master-cross-signing-key-without-user-interactive-auth) to temporarily grant the ability to update an existing cross-signing key without UIA. ([\#16634](https://github.com/matrix-org/synapse/issues/16634)) -- Improve references to GitHub issues. ([\#16637](https://github.com/matrix-org/synapse/issues/16637), [\#16638](https://github.com/matrix-org/synapse/issues/16638)) -- More efficiently handle no-op `POSITION` over replication. ([\#16640](https://github.com/matrix-org/synapse/issues/16640), [\#16655](https://github.com/matrix-org/synapse/issues/16655)) -- Speed up deleting of device messages when deleting a device. ([\#16643](https://github.com/matrix-org/synapse/issues/16643)) -- Speed up persisting large number of outliers. ([\#16649](https://github.com/matrix-org/synapse/issues/16649)) -- Reduce max concurrency of background tasks, reducing potential max DB load. ([\#16656](https://github.com/matrix-org/synapse/issues/16656), [\#16660](https://github.com/matrix-org/synapse/issues/16660)) -- Speed up purge room by adding an index to `event_push_summary`. ([\#16657](https://github.com/matrix-org/synapse/issues/16657)) - - - -### Updates to locked dependencies - -* Bump prometheus-client from 0.17.1 to 0.18.0. ([\#16626](https://github.com/matrix-org/synapse/issues/16626)) -* Bump pyicu from 2.11 to 2.12. ([\#16603](https://github.com/matrix-org/synapse/issues/16603)) -* Bump requests-toolbelt from 0.10.1 to 1.0.0. ([\#16659](https://github.com/matrix-org/synapse/issues/16659)) -* Bump ruff from 0.0.292 to 0.1.4. ([\#16600](https://github.com/matrix-org/synapse/issues/16600)) -* Bump serde from 1.0.190 to 1.0.192. ([\#16627](https://github.com/matrix-org/synapse/issues/16627)) -* Bump serde_json from 1.0.107 to 1.0.108. ([\#16604](https://github.com/matrix-org/synapse/issues/16604)) -* Bump setuptools-rust from 1.8.0 to 1.8.1. ([\#16601](https://github.com/matrix-org/synapse/issues/16601)) -* Bump towncrier from 23.6.0 to 23.11.0. ([\#16622](https://github.com/matrix-org/synapse/issues/16622)) -* Bump treq from 22.2.0 to 23.11.0. ([\#16623](https://github.com/matrix-org/synapse/issues/16623)) -* Bump twisted from 23.8.0 to 23.10.0. ([\#16588](https://github.com/matrix-org/synapse/issues/16588)) -* Bump types-bleach from 6.1.0.0 to 6.1.0.1. ([\#16624](https://github.com/matrix-org/synapse/issues/16624)) -* Bump types-jsonschema from 4.19.0.3 to 4.19.0.4. ([\#16599](https://github.com/matrix-org/synapse/issues/16599)) -* Bump types-pyopenssl from 23.2.0.2 to 23.3.0.0. ([\#16625](https://github.com/matrix-org/synapse/issues/16625)) -* Bump types-pyyaml from 6.0.12.11 to 6.0.12.12. ([\#16602](https://github.com/matrix-org/synapse/issues/16602)) - -# Synapse 1.96.1 (2023-11-17) - -Synapse will soon be forked by Element under an AGPLv3.0 licence (with CLA, for -proprietary dual licensing). You can read more about this here: - -* https://matrix.org/blog/2023/11/06/future-of-synapse-dendrite/ -* https://element.io/blog/element-to-adopt-agplv3/ - -The Matrix.org Foundation copy of the project will be archived. Any changes needed -by server administrators will be communicated via our usual -[announcements channels](https://matrix.to/#/#homeowners:matrix.org), but we are -striving to make this as seamless as possible. - -This minor release was needed only because of CI-related trouble on [v1.96.0](https://github.com/matrix-org/synapse/releases/tag/v1.96.0), which was never released. - -### Internal Changes - -- Fix building of wheels in CI. ([\#16653](https://github.com/matrix-org/synapse/issues/16653)) - -# Synapse 1.96.0 (2023-11-16) - -### Bugfixes +- Update the response when a client attempts to add an invalid email address to the user's account from a 500, to a 400 with error text. ([\#18125](https://github.com/element-hq/synapse/issues/18125)) +- Fix user directory search when using a legacy module with a `check_username_for_spam` callback. Broke in v1.122.0. ([\#18135](https://github.com/element-hq/synapse/issues/18135)) -- Fix "'int' object is not iterable" error in `set_device_id_for_pushers` background update introduced in Synapse 1.95.0. ([\#16594](https://github.com/matrix-org/synapse/issues/16594)) - -# Synapse 1.96.0rc1 (2023-10-31) - -### Features - -- Add experimental support to allow multiple workers to write to receipts stream. ([\#16432](https://github.com/matrix-org/synapse/issues/16432)) -- Add a new module API for controller presence. ([\#16544](https://github.com/matrix-org/synapse/issues/16544)) -- Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. ([\#16549](https://github.com/matrix-org/synapse/issues/16549)) -- Improve the performance of claiming encryption keys. ([\#16565](https://github.com/matrix-org/synapse/issues/16565), [\#16570](https://github.com/matrix-org/synapse/issues/16570)) - -### Bugfixes - -- Fixed a bug in the example Grafana dashboard that prevents it from finding the correct datasource. Contributed by @MichaelSasser. ([\#16471](https://github.com/matrix-org/synapse/issues/16471)) -- Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. ([\#16473](https://github.com/matrix-org/synapse/issues/16473), [\#16557](https://github.com/matrix-org/synapse/issues/16557), [\#16561](https://github.com/matrix-org/synapse/issues/16561), [\#16578](https://github.com/matrix-org/synapse/issues/16578), [\#16580](https://github.com/matrix-org/synapse/issues/16580)) -- Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. ([\#16485](https://github.com/matrix-org/synapse/issues/16485)) -- Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. ([\#16504](https://github.com/matrix-org/synapse/issues/16504)) -- Force TLS certificate verification in user registration script. ([\#16530](https://github.com/matrix-org/synapse/issues/16530)) -- Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. ([\#16540](https://github.com/matrix-org/synapse/issues/16540)) -- Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. ([\#16558](https://github.com/matrix-org/synapse/issues/16558)) -- Fix a long-standing bug where invited/knocking users would not leave during a room purge. ([\#16559](https://github.com/matrix-org/synapse/issues/16559)) - -### Improved Documentation - -- Improve documentation of presence router. ([\#16529](https://github.com/matrix-org/synapse/issues/16529)) -- Add a sentence to the [opentracing docs](https://matrix-org.github.io/synapse/latest/opentracing.html) on how you can have jaeger in a different place than synapse. ([\#16531](https://github.com/matrix-org/synapse/issues/16531)) -- Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. ([\#16541](https://github.com/matrix-org/synapse/issues/16541)) -- Pin the recommended poetry version in [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html). ([\#16550](https://github.com/matrix-org/synapse/issues/16550)) -- Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. ([\#16569](https://github.com/matrix-org/synapse/issues/16569)) - -### Internal Changes - -- Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). ([\#16492](https://github.com/matrix-org/synapse/issues/16492)) -- Reduce memory allocations. ([\#16505](https://github.com/matrix-org/synapse/issues/16505)) -- Improve replication performance when purging rooms. ([\#16510](https://github.com/matrix-org/synapse/issues/16510)) -- Run tests against Python 3.12. ([\#16511](https://github.com/matrix-org/synapse/issues/16511)) -- Run trial & integration tests in continuous integration when `.ci` directory is modified. ([\#16512](https://github.com/matrix-org/synapse/issues/16512)) -- Remove duplicate call to mark remote server 'awake' when using a federation sending worker. ([\#16515](https://github.com/matrix-org/synapse/issues/16515)) -- Enable dirty runs on Complement CI, which is significantly faster. ([\#16520](https://github.com/matrix-org/synapse/issues/16520)) -- Stop deleting from an unused table. ([\#16521](https://github.com/matrix-org/synapse/issues/16521)) -- Improve type hints. ([\#16526](https://github.com/matrix-org/synapse/issues/16526), [\#16551](https://github.com/matrix-org/synapse/issues/16551)) -- Fix running unit tests on Twisted trunk. ([\#16528](https://github.com/matrix-org/synapse/issues/16528)) -- Reduce some spurious logging in worker mode. ([\#16555](https://github.com/matrix-org/synapse/issues/16555)) -- Stop porting a table in port db that we're going to nuke and rebuild anyway. ([\#16563](https://github.com/matrix-org/synapse/issues/16563)) -- Deal with warnings from running complement in CI. ([\#16567](https://github.com/matrix-org/synapse/issues/16567)) -- Allow building with `setuptools_rust` 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) - -### Updates to locked dependencies - -* Bump black from 23.10.0 to 23.10.1. ([\#16575](https://github.com/matrix-org/synapse/issues/16575)) -* Bump black from 23.9.1 to 23.10.0. ([\#16538](https://github.com/matrix-org/synapse/issues/16538)) -* Bump cryptography from 41.0.4 to 41.0.5. ([\#16572](https://github.com/matrix-org/synapse/issues/16572)) -* Bump gitpython from 3.1.37 to 3.1.40. ([\#16534](https://github.com/matrix-org/synapse/issues/16534)) -* Bump phonenumbers from 8.13.22 to 8.13.23. ([\#16576](https://github.com/matrix-org/synapse/issues/16576)) -* Bump pygithub from 1.59.1 to 2.1.1. ([\#16535](https://github.com/matrix-org/synapse/issues/16535)) -- Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. ([\#16539](https://github.com/matrix-org/synapse/issues/16539)) -* Bump serde from 1.0.189 to 1.0.190. ([\#16577](https://github.com/matrix-org/synapse/issues/16577)) -* Bump setuptools-rust from 1.7.0 to 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) -* Bump types-pillow from 10.0.0.3 to 10.1.0.0. ([\#16536](https://github.com/matrix-org/synapse/issues/16536)) -* Bump types-psycopg2 from 2.9.21.14 to 2.9.21.15. ([\#16573](https://github.com/matrix-org/synapse/issues/16573)) -* Bump types-requests from 2.31.0.2 to 2.31.0.10. ([\#16537](https://github.com/matrix-org/synapse/issues/16537)) -* Bump urllib3 from 1.26.17 to 1.26.18. ([\#16516](https://github.com/matrix-org/synapse/issues/16516)) - -# Synapse 1.95.1 (2023-10-31) - -## Security advisory - -The following issue is fixed in 1.95.1. - -- [GHSA-mp92-3jfm-3575](https://github.com/matrix-org/synapse/security/advisories/GHSA-mp92-3jfm-3575) / [CVE-2023-43796](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-43796) — Moderate Severity - - Cached device information of remote users can be queried from Synapse. This can be used to enumerate the remote users known to a homeserver. - -See the advisory for more details. If you have any questions, email security@matrix.org. - - - -# Synapse 1.95.0 (2023-10-24) - -### Internal Changes - -- Build Debian packages for [Ubuntu 23.10 Mantic Minotaur](https://canonical.com/blog/canonical-releases-ubuntu-23-10-mantic-minotaur). ([\#16524](https://github.com/matrix-org/synapse/issues/16524)) - - -# Synapse 1.95.0rc1 (2023-10-17) - -### Bugfixes - -- Remove legacy unspecced `knock_state_events` field returned in some responses. ([\#16403](https://github.com/matrix-org/synapse/issues/16403)) -- Fix a bug introduced in Synapse 1.81.0 where an `AttributeError` would be raised when `_matrix/client/v3/account/whoami` is called over a unix socket. Contributed by @Sir-Photch. ([\#16404](https://github.com/matrix-org/synapse/issues/16404)) -- Properly return inline media when content types have parameters. ([\#16440](https://github.com/matrix-org/synapse/issues/16440)) -- Prevent the purging of large rooms from timing out when Postgres is in use. The timeout which causes this issue was introduced in Synapse 1.88.0. ([\#16455](https://github.com/matrix-org/synapse/issues/16455)) -- Improve the performance of purging rooms, particularly encrypted rooms. ([\#16457](https://github.com/matrix-org/synapse/issues/16457)) -- Fix a bug introduced in Synapse 1.59.0 where servers could be incorrectly marked as available after an error response was received. ([\#16506](https://github.com/matrix-org/synapse/issues/16506)) - -### Improved Documentation - -- Document internal background update mechanism. ([\#16420](https://github.com/matrix-org/synapse/issues/16420)) -- Fix a typo in the sql for [useful SQL for admins document](https://matrix-org.github.io/synapse/latest/usage/administration/useful_sql_for_admins.html). ([\#16477](https://github.com/matrix-org/synapse/issues/16477)) - -### Internal Changes - -- Bump pyo3 from 0.17.1 to 0.19.2. ([\#16162](https://github.com/matrix-org/synapse/issues/16162)) -- Update registration of media repository URLs. ([\#16419](https://github.com/matrix-org/synapse/issues/16419)) -- Improve type hints. ([\#16421](https://github.com/matrix-org/synapse/issues/16421), [\#16468](https://github.com/matrix-org/synapse/issues/16468), [\#16469](https://github.com/matrix-org/synapse/issues/16469), [\#16507](https://github.com/matrix-org/synapse/issues/16507)) -- Refactor some code to simplify and better type receipts stream adjacent code. ([\#16426](https://github.com/matrix-org/synapse/issues/16426)) -- Factor out `MultiWriter` token from `RoomStreamToken`. ([\#16427](https://github.com/matrix-org/synapse/issues/16427)) -- Improve code comments. ([\#16428](https://github.com/matrix-org/synapse/issues/16428)) -- Reduce memory allocations. ([\#16429](https://github.com/matrix-org/synapse/issues/16429), [\#16431](https://github.com/matrix-org/synapse/issues/16431), [\#16433](https://github.com/matrix-org/synapse/issues/16433), [\#16434](https://github.com/matrix-org/synapse/issues/16434), [\#16438](https://github.com/matrix-org/synapse/issues/16438), [\#16444](https://github.com/matrix-org/synapse/issues/16444)) -- Remove unused method. ([\#16435](https://github.com/matrix-org/synapse/issues/16435)) -- Improve rate limiting logic. ([\#16441](https://github.com/matrix-org/synapse/issues/16441)) -- Do not block running of CI behind the check for sign-off on PRs. ([\#16454](https://github.com/matrix-org/synapse/issues/16454)) -- Update the release script to remind releaser to check for special release notes. ([\#16461](https://github.com/matrix-org/synapse/issues/16461)) -- Update complement.sh to match new public API shape. ([\#16466](https://github.com/matrix-org/synapse/issues/16466)) -- Clean up logging on event persister endpoints. ([\#16488](https://github.com/matrix-org/synapse/issues/16488)) -- Remove useless async job to delete device messages on sync, since we only deliver (and hence delete) up to 100 device messages at a time. ([\#16491](https://github.com/matrix-org/synapse/issues/16491)) - -### Updates to locked dependencies - -* Bump bleach from 6.0.0 to 6.1.0. ([\#16451](https://github.com/matrix-org/synapse/issues/16451)) -* Bump jsonschema from 4.19.0 to 4.19.1. ([\#16500](https://github.com/matrix-org/synapse/issues/16500)) -* Bump netaddr from 0.8.0 to 0.9.0. ([\#16453](https://github.com/matrix-org/synapse/issues/16453)) -* Bump packaging from 23.1 to 23.2. ([\#16497](https://github.com/matrix-org/synapse/issues/16497)) -* Bump pillow from 10.0.1 to 10.1.0. ([\#16498](https://github.com/matrix-org/synapse/issues/16498)) -* Bump psycopg2 from 2.9.8 to 2.9.9. ([\#16452](https://github.com/matrix-org/synapse/issues/16452)) -* Bump pyo3-log from 0.8.3 to 0.8.4. ([\#16495](https://github.com/matrix-org/synapse/issues/16495)) -* Bump ruff from 0.0.290 to 0.0.292. ([\#16449](https://github.com/matrix-org/synapse/issues/16449)) -* Bump sentry-sdk from 1.31.0 to 1.32.0. ([\#16496](https://github.com/matrix-org/synapse/issues/16496)) -* Bump serde from 1.0.188 to 1.0.189. ([\#16494](https://github.com/matrix-org/synapse/issues/16494)) -* Bump types-bleach from 6.0.0.4 to 6.1.0.0. ([\#16450](https://github.com/matrix-org/synapse/issues/16450)) -* Bump types-jsonschema from 4.17.0.10 to 4.19.0.3. ([\#16499](https://github.com/matrix-org/synapse/issues/16499)) - -# Synapse 1.94.0 (2023-10-10) - -No significant changes since 1.94.0rc1. -However, please take note of the security advisory that follows. - -## Security advisory - -The following issue is fixed in 1.94.0 (and RC). - -- [GHSA-5chr-wjw5-3gq4](https://github.com/matrix-org/synapse/security/advisories/GHSA-5chr-wjw5-3gq4) / [CVE-2023-45129](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-45129) — Moderate Severity - - A malicious server ACL event can impact performance temporarily or permanently leading to a persistent denial of service. - - Homeservers running on a closed federation (which presumably do not need to use server ACLs) are not affected. - -See the advisory for more details. If you have any questions, email security@matrix.org. - - -# Synapse 1.94.0rc1 (2023-10-03) - -### Features - -- Render plain, CSS, CSV, JSON and common image formats in the browser (inline) when requested through the /download endpoint. ([\#15988](https://github.com/matrix-org/synapse/issues/15988)) -- Add experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. ([\#16361](https://github.com/matrix-org/synapse/issues/16361)) -- Minor performance improvement when sending presence to federated servers. ([\#16385](https://github.com/matrix-org/synapse/issues/16385)) -- Minor performance improvement by caching server ACL checking. ([\#16360](https://github.com/matrix-org/synapse/issues/16360)) - -### Improved Documentation - -- Add developer documentation concerning gradual schema migrations with column alterations. ([\#15691](https://github.com/matrix-org/synapse/issues/15691)) -- Improve documentation of the user directory search algorithm. ([\#16320](https://github.com/matrix-org/synapse/issues/16320)) -- Fix rendering of user admin API documentation around deactivation. This was broken in Synapse 1.91.0. ([\#16355](https://github.com/matrix-org/synapse/issues/16355)) -- Update documentation around message retention policies. ([\#16382](https://github.com/matrix-org/synapse/issues/16382)) -- Add note to `federation_domain_whitelist` config option to clarify its usage. ([\#16416](https://github.com/matrix-org/synapse/issues/16416)) -- Improve legacy release notes. ([\#16418](https://github.com/matrix-org/synapse/issues/16418)) - -### Deprecations and Removals - -- Remove Python version from `/_synapse/admin/v1/server_version`. ([\#16380](https://github.com/matrix-org/synapse/issues/16380)) - -### Internal Changes - -- Avoid running CI steps when the files they check have not been changed. ([\#14745](https://github.com/matrix-org/synapse/issues/14745), [\#16387](https://github.com/matrix-org/synapse/issues/16387)) -- Improve type hints. ([\#14911](https://github.com/matrix-org/synapse/issues/14911), [\#16350](https://github.com/matrix-org/synapse/issues/16350), [\#16356](https://github.com/matrix-org/synapse/issues/16356), [\#16395](https://github.com/matrix-org/synapse/issues/16395)) -- Added support for pydantic v2 in addition to pydantic v1. Contributed by Maxwell G (@gotmax23). ([\#16332](https://github.com/matrix-org/synapse/issues/16332)) -- Get CI to check PRs have been signed-off. ([\#16348](https://github.com/matrix-org/synapse/issues/16348)) -- Add missing licence header. ([\#16359](https://github.com/matrix-org/synapse/issues/16359)) -- Improve type hints, and bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. ([\#16381](https://github.com/matrix-org/synapse/issues/16381)) -- Improve comments in `StateGroupBackgroundUpdateStore`. ([\#16383](https://github.com/matrix-org/synapse/issues/16383)) -- Update maturin configuration. ([\#16394](https://github.com/matrix-org/synapse/issues/16394)) -- Downgrade replication stream time out error log lines to warning. ([\#16401](https://github.com/matrix-org/synapse/issues/16401)) - -### Updates to locked dependencies - -* Bump actions/checkout from 3 to 4. ([\#16250](https://github.com/matrix-org/synapse/issues/16250)) -* Bump cryptography from 41.0.3 to 41.0.4. ([\#16362](https://github.com/matrix-org/synapse/issues/16362)) -* Bump dawidd6/action-download-artifact from 2.27.0 to 2.28.0. ([\#16374](https://github.com/matrix-org/synapse/issues/16374)) -* Bump docker/setup-buildx-action from 2 to 3. ([\#16375](https://github.com/matrix-org/synapse/issues/16375)) -* Bump gitpython from 3.1.35 to 3.1.37. ([\#16376](https://github.com/matrix-org/synapse/issues/16376)) -* Bump msgpack from 1.0.5 to 1.0.6. ([\#16377](https://github.com/matrix-org/synapse/issues/16377)) -* Bump msgpack from 1.0.6 to 1.0.7. ([\#16412](https://github.com/matrix-org/synapse/issues/16412)) -* Bump phonenumbers from 8.13.19 to 8.13.22. ([\#16413](https://github.com/matrix-org/synapse/issues/16413)) -* Bump psycopg2 from 2.9.7 to 2.9.8. ([\#16409](https://github.com/matrix-org/synapse/issues/16409)) -* Bump pydantic from 2.3.0 to 2.4.2. ([\#16410](https://github.com/matrix-org/synapse/issues/16410)) -* Bump regex from 1.9.5 to 1.9.6. ([\#16408](https://github.com/matrix-org/synapse/issues/16408)) -* Bump sentry-sdk from 1.30.0 to 1.31.0. ([\#16378](https://github.com/matrix-org/synapse/issues/16378)) -* Bump types-netaddr from 0.8.0.9 to 0.9.0.1. ([\#16411](https://github.com/matrix-org/synapse/issues/16411)) -* Bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. ([\#16381](https://github.com/matrix-org/synapse/issues/16381)) -* Bump urllib3 from 1.26.15 to 1.26.17. ([\#16422](https://github.com/matrix-org/synapse/issues/16422)) - -# Synapse 1.93.0 (2023-09-26) - -No significant changes since 1.93.0rc1. - - -## Security advisory - -The following issues are fixed in 1.93.0 (and RCs). - -- [GHSA-4f74-84v3-j9q5](https://github.com/matrix-org/synapse/security/advisories/GHSA-4f74-84v3-j9q5) / [CVE-2023-41335](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-41335) — Low Severity - - Temporary storage of plaintext passwords during password changes. - -- [GHSA-7565-cq32-vx2x](https://github.com/matrix-org/synapse/security/advisories/GHSA-7565-cq32-vx2x) / [CVE-2023-42453](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-42453) — Low Severity - - Improper validation of receipts allows forged read receipts. - -See the advisories for more details. If you have any questions, email security@matrix.org. - - -# Synapse 1.93.0rc1 (2023-09-19) - -### Features - -- Add automatic purge after all users have forgotten a room. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) -- Restore room purge/shutdown after a Synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) -- Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). ([\#16137](https://github.com/matrix-org/synapse/issues/16137)) -- Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. ([\#16219](https://github.com/matrix-org/synapse/issues/16219)) -- Add span information to requests sent to appservices. Contributed by MTRNord. ([\#16227](https://github.com/matrix-org/synapse/issues/16227)) -- Add the ability to enable/disable registrations when using CAS. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262)) -- Allow the `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265)) -- Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. ([\#16274](https://github.com/matrix-org/synapse/issues/16274)) -- Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. ([\#16328](https://github.com/matrix-org/synapse/issues/16328)) - -### Bugfixes +### Updates to the Docker image -- Fix a long-standing bug where multi-device accounts could cause high load due to presence. ([\#16066](https://github.com/matrix-org/synapse/issues/16066), [\#16170](https://github.com/matrix-org/synapse/issues/16170), [\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172), [\#16174](https://github.com/matrix-org/synapse/issues/16174)) -- Fix a long-standing bug where appservices using [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) to receive `to_device` messages would only get messages for one user. ([\#16251](https://github.com/matrix-org/synapse/issues/16251)) -- Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. ([\#16252](https://github.com/matrix-org/synapse/issues/16252)) -- Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. ([\#16257](https://github.com/matrix-org/synapse/issues/16257)) -- Avoid temporary storage of sensitive information. ([\#16272](https://github.com/matrix-org/synapse/issues/16272)) -- Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. ([\#16288](https://github.com/matrix-org/synapse/issues/16288)) -- Fix a long-standing bug where invalid receipts would be accepted. ([\#16327](https://github.com/matrix-org/synapse/issues/16327)) -- Use standard name for UTF-8 charset in emails. ([\#16329](https://github.com/matrix-org/synapse/issues/16329)) -- Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298)) +- Add `SYNAPSE_HTTP_PROXY`/`SYNAPSE_HTTPS_PROXY`/`SYNAPSE_NO_PROXY` environment variables to pass through specifically to the Synapse process (instead of needing to apply [`http_proxy`/`https_proxy`/`no_proxy`](https://element-hq.github.io/synapse/latest/setup/forward_proxy.html) globally). ([\#18158](https://github.com/element-hq/synapse/issues/18158)) ### Improved Documentation -- Fix typos in the documentation. ([\#16282](https://github.com/matrix-org/synapse/issues/16282)) -- Link to the Alpine Linux community package for Synapse. ([\#16304](https://github.com/matrix-org/synapse/issues/16304)) -- Use string for `federation_client_minimum_tls_version` documentation examples. Contributed by @jcgruenhage. ([\#16353](https://github.com/matrix-org/synapse/issues/16353)) +- Add Oracle Linux 8 and 9 installation instructions. ([\#17436](https://github.com/element-hq/synapse/issues/17436)) +- Document missing server config options (`daemonize`, `print_pidfile`, `user_agent_suffix`, `use_frozen_dicts`, `manhole`). ([\#18122](https://github.com/element-hq/synapse/issues/18122)) +- Document consequences of replacing secrets. ([\#18138](https://github.com/element-hq/synapse/issues/18138)) +- Make `burst_count` field an integer in `rc_presence` config documentation example. ([\#18159](https://github.com/element-hq/synapse/issues/18159)) ### Internal Changes -- Allow modules to delete rooms. ([\#15997](https://github.com/matrix-org/synapse/issues/15997)) -- Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. ([\#16090](https://github.com/matrix-org/synapse/issues/16090), [\#16263](https://github.com/matrix-org/synapse/issues/16263)) -- Fix type checking when using the new version of Twisted. ([\#16235](https://github.com/matrix-org/synapse/issues/16235)) -- Delete device messages asynchronously and in staged batches using the task scheduler. ([\#16240](https://github.com/matrix-org/synapse/issues/16240), [\#16311](https://github.com/matrix-org/synapse/issues/16311), [\#16312](https://github.com/matrix-org/synapse/issues/16312), [\#16313](https://github.com/matrix-org/synapse/issues/16313)) -- Bump minimum supported Rust version to 1.61.0. ([\#16248](https://github.com/matrix-org/synapse/issues/16248)) -- Update rust to version 1.71.1 in the nix development environment. ([\#16260](https://github.com/matrix-org/synapse/issues/16260)) -- Simplify server key storage. ([\#16261](https://github.com/matrix-org/synapse/issues/16261)) -- Reduce CPU overhead of change password endpoint. ([\#16264](https://github.com/matrix-org/synapse/issues/16264)) -- Stop purging from tables slated for removal. ([\#16273](https://github.com/matrix-org/synapse/issues/16273)) -- Improve type hints. ([\#16276](https://github.com/matrix-org/synapse/issues/16276), [\#16301](https://github.com/matrix-org/synapse/issues/16301), [\#16325](https://github.com/matrix-org/synapse/issues/16325), [\#16326](https://github.com/matrix-org/synapse/issues/16326)) -- Raise `setuptools_rust` version cap to 1.7.0. ([\#16277](https://github.com/matrix-org/synapse/issues/16277)) -- Fix using the new task scheduler causing lots of CPU to be used. ([\#16278](https://github.com/matrix-org/synapse/issues/16278)) -- Upgrade CI run of Python 3.12 from rc1 to rc2. ([\#16280](https://github.com/matrix-org/synapse/issues/16280)) -- Include values in SQL debug when using `execute_values` with Postgres. ([\#16281](https://github.com/matrix-org/synapse/issues/16281)) -- Enable additional linting checks. ([\#16283](https://github.com/matrix-org/synapse/issues/16283)) -- Refactor `receipts_graph` Postgres transactions to stop error messages. ([\#16299](https://github.com/matrix-org/synapse/issues/16299)) -- Small improvements to logging in replication code. ([\#16309](https://github.com/matrix-org/synapse/issues/16309)) -- Remove a reference cycle in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314)) -- Only use literal strings for background process names. ([\#16315](https://github.com/matrix-org/synapse/issues/16315)) -- Refactor `get_user_by_id`. ([\#16316](https://github.com/matrix-org/synapse/issues/16316)) -- Speed up task to delete to-device messages. ([\#16318](https://github.com/matrix-org/synapse/issues/16318)) -- Avoid patching code in tests. ([\#16349](https://github.com/matrix-org/synapse/issues/16349)) -- Test against PostgreSQL 16. ([\#16351](https://github.com/matrix-org/synapse/issues/16351)) - -### Updates to locked dependencies - -* Bump mypy from 1.4.1 to 1.5.1. ([\#16300](https://github.com/matrix-org/synapse/issues/16300)) -* Bump black from 23.7.0 to 23.9.1. ([\#16295](https://github.com/matrix-org/synapse/issues/16295)) -* Bump docker/build-push-action from 4 to 5. ([\#16336](https://github.com/matrix-org/synapse/issues/16336)) -* Bump docker/login-action from 2 to 3. ([\#16339](https://github.com/matrix-org/synapse/issues/16339)) -* Bump docker/metadata-action from 4 to 5. ([\#16337](https://github.com/matrix-org/synapse/issues/16337)) -* Bump docker/setup-qemu-action from 2 to 3. ([\#16338](https://github.com/matrix-org/synapse/issues/16338)) -* Bump furo from 2023.8.19 to 2023.9.10. ([\#16340](https://github.com/matrix-org/synapse/issues/16340)) -* Bump gitpython from 3.1.32 to 3.1.35. ([\#16267](https://github.com/matrix-org/synapse/issues/16267), [\#16279](https://github.com/matrix-org/synapse/issues/16279)) -* Bump mypy-zope from 1.0.0 to 1.0.1. ([\#16291](https://github.com/matrix-org/synapse/issues/16291)) -* Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344)) -* Bump regex from 1.9.4 to 1.9.5. ([\#16233](https://github.com/matrix-org/synapse/issues/16233)) -* Bump ruff from 0.0.286 to 0.0.290. ([\#16342](https://github.com/matrix-org/synapse/issues/16342)) -* Bump serde_json from 1.0.105 to 1.0.107. ([\#16296](https://github.com/matrix-org/synapse/issues/16296), [\#16345](https://github.com/matrix-org/synapse/issues/16345)) -* Bump twisted from 22.10.0 to 23.8.0. ([\#16235](https://github.com/matrix-org/synapse/issues/16235)) -* Bump types-pillow from 10.0.0.2 to 10.0.0.3. ([\#16293](https://github.com/matrix-org/synapse/issues/16293)) -* Bump types-setuptools from 68.0.0.3 to 68.2.0.0. ([\#16292](https://github.com/matrix-org/synapse/issues/16292)) -* Bump typing-extensions from 4.7.1 to 4.8.0. ([\#16341](https://github.com/matrix-org/synapse/issues/16341)) - -# Synapse 1.92.3 (2023-09-18) +- Overload `DatabasePool.simple_select_one_txn` to return non-`None` when the `allow_none` parameter is `False`. ([\#17616](https://github.com/element-hq/synapse/issues/17616)) +- Python 3.8 EOL: compile native extensions with the 3.9 ABI and use typing hints from the standard library. ([\#17967](https://github.com/element-hq/synapse/issues/17967)) +- Add log message when worker lock timeouts get large. ([\#18124](https://github.com/element-hq/synapse/issues/18124)) +- Make it explicit that you can buy an AGPL-alternative commercial license from Element. ([\#18134](https://github.com/element-hq/synapse/issues/18134)) +- Fix the 'Fix linting' GitHub Actions workflow. ([\#18136](https://github.com/element-hq/synapse/issues/18136)) +- Do not log at the exception-level when clients provide empty `since` token to `/sync` API. ([\#18139](https://github.com/element-hq/synapse/issues/18139)) +- Reduce database load of user search when using large search terms. ([\#18172](https://github.com/element-hq/synapse/issues/18172)) -This is again a security update targeted at mitigating [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863). -It turns out that libwebp is bundled statically in Pillow wheels so we need to update this dependency instead of -libwebp package at the OS level. - -Unlike what was advertised in 1.92.2 changelog this release also impacts PyPI wheels and Debian packages from matrix.org. - -We encourage admins to upgrade as soon as possible. - - -### Internal Changes -- Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. ([\#16347](https://github.com/matrix-org/synapse/issues/16347)) ### Updates to locked dependencies -* Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344)) +* Bump bcrypt from 4.2.0 to 4.2.1. ([\#18127](https://github.com/element-hq/synapse/issues/18127)) +* Bump bytes from 1.9.0 to 1.10.0. ([\#18149](https://github.com/element-hq/synapse/issues/18149)) +* Bump gitpython from 3.1.43 to 3.1.44. ([\#18128](https://github.com/element-hq/synapse/issues/18128)) +* Bump hiredis from 3.0.0 to 3.1.0. ([\#18169](https://github.com/element-hq/synapse/issues/18169)) +* Bump serde_json from 1.0.137 to 1.0.138. ([\#18129](https://github.com/element-hq/synapse/issues/18129)) +* Bump service-identity from 24.1.0 to 24.2.0. ([\#18171](https://github.com/element-hq/synapse/issues/18171)) +* Bump sigstore/cosign-installer from 3.7.0 to 3.8.0. ([\#18147](https://github.com/element-hq/synapse/issues/18147)) +* Bump twine from 6.0.1 to 6.1.0. ([\#18170](https://github.com/element-hq/synapse/issues/18170)) +* Bump types-pyyaml from 6.0.12.20240917 to 6.0.12.20241230. ([\#18097](https://github.com/element-hq/synapse/issues/18097)) +* Bump ulid from 1.1.4 to 1.2.0. ([\#18148](https://github.com/element-hq/synapse/issues/18148)) -# Synapse 1.92.2 (2023-09-15) +# Synapse 1.124.0 (2025-02-11) -This is a Docker-only update to mitigate [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863), a critical vulnerability in `libwebp`. Server admins not using Docker should ensure that their `libwebp` is up to date (if installed). We encourage admins to upgrade as soon as possible. +No significant changes since 1.124.0rc3. -### Updates to the Docker image - -- Update docker image to use Debian bookworm as the base. ([\#16324](https://github.com/matrix-org/synapse/issues/16324)) - - -# Synapse 1.92.1 (2023-09-12) - -This minor release was needed only because of CI-related trouble on [v1.92.0](https://github.com/matrix-org/synapse/releases/tag/v1.92.0), which was never released. - -### Internal Changes - -- Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. - -# Synapse 1.92.0 (2023-09-12) -This release includes the same [bugfix](https://github.com/matrix-org/synapse/issues/16258) as Synapse 1.91.2. - -This version was never released following a CI build failure, cf [v1.92.1 changelog](https://github.com/matrix-org/synapse/releases/tag/v1.92.1). +# Synapse 1.124.0rc3 (2025-02-07) ### Bugfixes -- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) +- Fix regression in performance of sending events due to superfluous reads and locks. Introduced in v1.124.0rc1. ([\#18141](https://github.com/element-hq/synapse/issues/18141)) -### Internal Changes -- Fix incorrect docstring for `Ratelimiter`. ([\#16255](https://github.com/matrix-org/synapse/issues/16255)) -- Update the release script to work on macOS. ([\#16266](https://github.com/matrix-org/synapse/issues/16266)) -# Synapse 1.91.2 (2023-09-06) +# Synapse 1.124.0rc2 (2025-02-05) ### Bugfixes -- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) +- Fix regression where persisting events in some rooms could fail after a previous unclean shutdown. Introduced in v1.124.0rc1. ([\#18137](https://github.com/element-hq/synapse/issues/18137)) -# Synapse 1.92.0rc1 (2023-09-05) -### Features -- Add configuration setting for CAS protocol version. Contributed by Aurélien Grimpard. ([\#15816](https://github.com/matrix-org/synapse/issues/15816)) -- Suppress notifications from message edits per [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958). ([\#16113](https://github.com/matrix-org/synapse/issues/16113)) -- Experimental support for [MSC4041](https://github.com/matrix-org/matrix-spec-proposals/pull/4041): return a `Retry-After` header with `M_LIMIT_EXCEEDED` error responses. ([\#16136](https://github.com/matrix-org/synapse/issues/16136)) -- Add `last_seen_ts` to the [admin users API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#16218](https://github.com/matrix-org/synapse/issues/16218)) -- Improve resource usage when sending data to a large number of remote hosts that are marked as "down". ([\#16223](https://github.com/matrix-org/synapse/issues/16223)) +# Synapse 1.124.0rc1 (2025-02-04) ### Bugfixes -- Fix IPv6-related bugs on SMTP settings, adding groundwork to fix similar issues. Contributed by @evilham and @telmich (ungleich.ch). ([\#16155](https://github.com/matrix-org/synapse/issues/16155)) -- Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `include_all_networks` as a string. ([\#16185](https://github.com/matrix-org/synapse/issues/16185)) -- Fix inaccurate error message while attempting to ban or unban a user with the same or higher PL by spliting the conditional statements. Contributed by @leviosacz. ([\#16205](https://github.com/matrix-org/synapse/issues/16205)) -- Fix a rare bug that broke looping calls, which could lead to e.g. linearly increasing memory usage. Introduced in v1.90.0. ([\#16210](https://github.com/matrix-org/synapse/issues/16210)) -- Fix a long-standing bug where uploading images would fail if we could not generate thumbnails for them. ([\#16211](https://github.com/matrix-org/synapse/issues/16211)) -- Fix a long-standing bug where we did not correctly back off from servers that had "gone" if they returned 4xx series error codes. ([\#16221](https://github.com/matrix-org/synapse/issues/16221)) - -### Improved Documentation - -- Update links to the [matrix.org blog](https://matrix.org/blog/). ([\#16008](https://github.com/matrix-org/synapse/issues/16008)) -- Document which [admin APIs](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) are disabled when experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support is enabled. ([\#16168](https://github.com/matrix-org/synapse/issues/16168)) -- Document [`exclude_rooms_from_sync`](https://matrix-org.github.io/synapse/v1.92/usage/configuration/config_documentation.html#exclude_rooms_from_sync) configuration option. ([\#16178](https://github.com/matrix-org/synapse/issues/16178)) +- Add rate limit `rc_presence.per_user`. This prevents load from excessive presence updates sent by clients via sync api. Also rate limit `/_matrix/client/v3/presence` as per the spec. Contributed by @rda0. ([\#18000](https://github.com/element-hq/synapse/issues/18000)) +- Deactivated users will no longer automatically accept an invite when `auto_accept_invites` is enabled. ([\#18073](https://github.com/element-hq/synapse/issues/18073)) +- Fix join being denied after being invited over federation. Also fixes other out-of-band membership transitions. ([\#18075](https://github.com/element-hq/synapse/issues/18075)) +- Updates contributed `docker-compose.yml` file to PostgreSQL v15, as v12 is no longer supported by Synapse. + Contributed by @maxkratz. ([\#18089](https://github.com/element-hq/synapse/issues/18089)) +- Fix rare edge case where state groups could be deleted while we are persisting new events that reference them. ([\#18107](https://github.com/element-hq/synapse/issues/18107), [\#18130](https://github.com/element-hq/synapse/issues/18130), [\#18131](https://github.com/element-hq/synapse/issues/18131)) +- Raise an error if someone is using an incorrect suffix in a config duration string. ([\#18112](https://github.com/element-hq/synapse/issues/18112)) +- Fix a bug where the [Delete Room Admin API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#version-2-new-version) would fail if the `block` parameter was set to `true` and a worker other than the main process was configured to handle background tasks. ([\#18119](https://github.com/element-hq/synapse/issues/18119)) ### Internal Changes -- Prepare unit tests for Python 3.12. ([\#16099](https://github.com/matrix-org/synapse/issues/16099)) -- Fix nightly CI jobs. ([\#16121](https://github.com/matrix-org/synapse/issues/16121), [\#16213](https://github.com/matrix-org/synapse/issues/16213)) -- Describe which rate limiter was hit in logs. ([\#16135](https://github.com/matrix-org/synapse/issues/16135)) -- Simplify presence code when using workers. ([\#16170](https://github.com/matrix-org/synapse/issues/16170)) -- Track per-device information in the presence code. ([\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172)) -- Stop using the `event_txn_id` table. ([\#16175](https://github.com/matrix-org/synapse/issues/16175)) -- Use `AsyncMock` instead of custom code. ([\#16179](https://github.com/matrix-org/synapse/issues/16179), [\#16180](https://github.com/matrix-org/synapse/issues/16180)) -- Improve error reporting of invalid data passed to `/_matrix/key/v2/query`. ([\#16183](https://github.com/matrix-org/synapse/issues/16183)) -- Task scheduler: add replication notify for new task to launch ASAP. ([\#16184](https://github.com/matrix-org/synapse/issues/16184)) -- Improve type hints. ([\#16186](https://github.com/matrix-org/synapse/issues/16186), [\#16188](https://github.com/matrix-org/synapse/issues/16188), [\#16201](https://github.com/matrix-org/synapse/issues/16201)) -- Bump black version to 23.7.0. ([\#16187](https://github.com/matrix-org/synapse/issues/16187)) -- Log the details of background update failures. ([\#16212](https://github.com/matrix-org/synapse/issues/16212)) -- Cache device resync requests over replication. ([\#16241](https://github.com/matrix-org/synapse/issues/16241)) - -### Updates to locked dependencies - -* Bump anyhow from 1.0.72 to 1.0.75. ([\#16141](https://github.com/matrix-org/synapse/issues/16141)) -* Bump furo from 2023.7.26 to 2023.8.19. ([\#16238](https://github.com/matrix-org/synapse/issues/16238)) -* Bump phonenumbers from 8.13.18 to 8.13.19. ([\#16237](https://github.com/matrix-org/synapse/issues/16237)) -* Bump psycopg2 from 2.9.6 to 2.9.7. ([\#16196](https://github.com/matrix-org/synapse/issues/16196)) -* Bump regex from 1.9.3 to 1.9.4. ([\#16195](https://github.com/matrix-org/synapse/issues/16195)) -* Bump ruff from 0.0.277 to 0.0.286. ([\#16198](https://github.com/matrix-org/synapse/issues/16198)) -* Bump sentry-sdk from 1.29.2 to 1.30.0. ([\#16236](https://github.com/matrix-org/synapse/issues/16236)) -* Bump serde from 1.0.184 to 1.0.188. ([\#16194](https://github.com/matrix-org/synapse/issues/16194)) -* Bump serde_json from 1.0.104 to 1.0.105. ([\#16140](https://github.com/matrix-org/synapse/issues/16140)) -* Bump types-psycopg2 from 2.9.21.10 to 2.9.21.11. ([\#16200](https://github.com/matrix-org/synapse/issues/16200)) -* Bump types-pyyaml from 6.0.12.10 to 6.0.12.11. ([\#16199](https://github.com/matrix-org/synapse/issues/16199)) - - -# Synapse 1.91.1 (2023-09-04) - -### Bugfixes - -- Fix a performance regression introduced in Synapse 1.91.0 where event persistence would cause an excessive linear growth in CPU usage. ([\#16220](https://github.com/matrix-org/synapse/issues/16220)) - +- Increase the length of the generated `nonce` parameter when perfoming OIDC logins to comply with the TI-Messenger spec. ([\#18109](https://github.com/element-hq/synapse/issues/18109)) -# Synapse 1.91.0 (2023-08-30) -No significant changes since 1.91.0rc1. - - -# Synapse 1.91.0rc1 (2023-08-23) - -### Features - -- Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870)) -- Implements a task scheduler for resumable potentially long running tasks. ([\#15891](https://github.com/matrix-org/synapse/issues/15891)) -- Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030)) -- Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094)) -- Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. ([\#16114](https://github.com/matrix-org/synapse/issues/16114)) - -### Bugfixes - -- Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). ([\#16052](https://github.com/matrix-org/synapse/issues/16052)) -- Fix a long-standing bu in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. ([\#16080](https://github.com/matrix-org/synapse/issues/16080)) -- Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. ([\#16116](https://github.com/matrix-org/synapse/issues/16116)) -- Filter out user agent references to the sliding sync proxy and rust-sdk from the user_daily_visits table to ensure that Element X can be represented fully. ([\#16124](https://github.com/matrix-org/synapse/issues/16124)) -- User constent and 3-PID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. ([\#16127](https://github.com/matrix-org/synapse/issues/16127), [\#16134](https://github.com/matrix-org/synapse/issues/16134)) -- Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. ([\#16133](https://github.com/matrix-org/synapse/issues/16133), [\#16169](https://github.com/matrix-org/synapse/issues/16169)) -- Fix performance degredation when there are a lot of in-flight replication requests. ([\#16148](https://github.com/matrix-org/synapse/issues/16148)) -- Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. ([\#16156](https://github.com/matrix-org/synapse/issues/16156), [\#16164](https://github.com/matrix-org/synapse/issues/16164)) - -### Improved Documentation - -- Structured logging docs: add a link to explain the ELK stack ([\#16091](https://github.com/matrix-org/synapse/issues/16091)) - -### Internal Changes - -- Update dehydrated devices implementation. ([\#16010](https://github.com/matrix-org/synapse/issues/16010)) -- Fix database performance of read/write worker locks. ([\#16061](https://github.com/matrix-org/synapse/issues/16061)) -- Fix building the nix development environment on MacOS systems. ([\#16063](https://github.com/matrix-org/synapse/issues/16063)) -- Override global statement timeout when creating indexes in Postgres. ([\#16085](https://github.com/matrix-org/synapse/issues/16085)) -- Fix the type annotation on `run_db_interaction` in the Module API. ([\#16089](https://github.com/matrix-org/synapse/issues/16089)) -- Clean-up the presence code. ([\#16092](https://github.com/matrix-org/synapse/issues/16092)) -- Run `pyupgrade` for Python 3.8+. ([\#16110](https://github.com/matrix-org/synapse/issues/16110)) -- Rename pagination and purge locks and add comments to explain why they exist and how they work. ([\#16112](https://github.com/matrix-org/synapse/issues/16112)) -- Attempt to fix the twisted trunk job. ([\#16115](https://github.com/matrix-org/synapse/issues/16115)) -- Cache token introspection response from OIDC provider. ([\#16117](https://github.com/matrix-org/synapse/issues/16117)) -- Add cache to `get_server_keys_json_for_remote`. ([\#16123](https://github.com/matrix-org/synapse/issues/16123)) -- Add an admin endpoint to allow authorizing server to signal token revocations. ([\#16125](https://github.com/matrix-org/synapse/issues/16125)) -- Add response time metrics for introspection requests for delegated auth. ([\#16131](https://github.com/matrix-org/synapse/issues/16131)) -- MSC3861: allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. ([\#16132](https://github.com/matrix-org/synapse/issues/16132)) -- Increase performance of read/write locks. ([\#16149](https://github.com/matrix-org/synapse/issues/16149)) -- Improve presence tests. ([\#16150](https://github.com/matrix-org/synapse/issues/16150), [\#16151](https://github.com/matrix-org/synapse/issues/16151), [\#16158](https://github.com/matrix-org/synapse/issues/16158)) -- Raised the poetry-core version cap to 1.7.0. ([\#16152](https://github.com/matrix-org/synapse/issues/16152)) -- Fix assertion in user directory unit tests. ([\#16157](https://github.com/matrix-org/synapse/issues/16157)) -- Reduce scope of locks when paginating to alleviate DB contention. ([\#16159](https://github.com/matrix-org/synapse/issues/16159)) -- Reduce DB contention on worker locks. ([\#16160](https://github.com/matrix-org/synapse/issues/16160)) -- Task scheduler: mark task as active if we are scheduling as soon as possible. ([\#16165](https://github.com/matrix-org/synapse/issues/16165)) ### Updates to locked dependencies -* Bump click from 8.1.6 to 8.1.7. ([\#16145](https://github.com/matrix-org/synapse/issues/16145)) -* Bump gitpython from 3.1.31 to 3.1.32. ([\#16103](https://github.com/matrix-org/synapse/issues/16103)) -* Bump ijson from 3.2.1 to 3.2.3. ([\#16143](https://github.com/matrix-org/synapse/issues/16143)) -* Bump isort from 5.11.5 to 5.12.0. ([\#16108](https://github.com/matrix-org/synapse/issues/16108)) -* Bump log from 0.4.19 to 0.4.20. ([\#16109](https://github.com/matrix-org/synapse/issues/16109)) -* Bump pygithub from 1.59.0 to 1.59.1. ([\#16144](https://github.com/matrix-org/synapse/issues/16144)) -* Bump sentry-sdk from 1.28.1 to 1.29.2. ([\#16142](https://github.com/matrix-org/synapse/issues/16142)) -* Bump serde from 1.0.183 to 1.0.184. ([\#16139](https://github.com/matrix-org/synapse/issues/16139)) -* Bump txredisapi from 1.4.9 to 1.4.10. ([\#16107](https://github.com/matrix-org/synapse/issues/16107)) -* Bump types-bleach from 6.0.0.3 to 6.0.0.4. ([\#16106](https://github.com/matrix-org/synapse/issues/16106)) -* Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105)) -* Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146)) - -# Synapse 1.91.0rc1 (2023-08-23) - -### Features - -- Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870)) -- Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030)) -- Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094)) -- Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. ([\#16114](https://github.com/matrix-org/synapse/issues/16114)) - -### Bugfixes - -- Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). ([\#16052](https://github.com/matrix-org/synapse/issues/16052)) -- Fix a long-standing bug in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. ([\#16080](https://github.com/matrix-org/synapse/issues/16080)) -- Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. ([\#16116](https://github.com/matrix-org/synapse/issues/16116)) -- Filter out user agent references to the sliding sync proxy and rust-sdk from the `user_daily_visits` table to ensure that Element X can be represented fully. ([\#16124](https://github.com/matrix-org/synapse/issues/16124)) -- User constent and third-party ID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. ([\#16127](https://github.com/matrix-org/synapse/issues/16127), [\#16134](https://github.com/matrix-org/synapse/issues/16134)) -- Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. ([\#16133](https://github.com/matrix-org/synapse/issues/16133), [\#16169](https://github.com/matrix-org/synapse/issues/16169)) -- Fix performance degredation when there are a lot of in-flight replication requests. ([\#16148](https://github.com/matrix-org/synapse/issues/16148)) -- Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. ([\#16156](https://github.com/matrix-org/synapse/issues/16156), [\#16164](https://github.com/matrix-org/synapse/issues/16164)) - -### Improved Documentation +* Bump dawidd6/action-download-artifact from 7 to 8. ([\#18108](https://github.com/element-hq/synapse/issues/18108)) +* Bump log from 0.4.22 to 0.4.25. ([\#18098](https://github.com/element-hq/synapse/issues/18098)) +* Bump python-multipart from 0.0.18 to 0.0.20. ([\#18096](https://github.com/element-hq/synapse/issues/18096)) +* Bump serde_json from 1.0.135 to 1.0.137. ([\#18099](https://github.com/element-hq/synapse/issues/18099)) +* Bump types-bleach from 6.1.0.20240331 to 6.2.0.20241123. ([\#18082](https://github.com/element-hq/synapse/issues/18082)) -- Structured logging docs: add a link to explain the ELK stack ([\#16091](https://github.com/matrix-org/synapse/issues/16091)) +# Synapse 1.123.0 (2025-01-28) -### Internal Changes +No significant changes since 1.123.0rc1. -- Update dehydrated devices implementation. ([\#16010](https://github.com/matrix-org/synapse/issues/16010)) -- Fix database performance of read/write worker locks. ([\#16061](https://github.com/matrix-org/synapse/issues/16061)) -- Fix building the nix development environment on MacOS systems. ([\#16063](https://github.com/matrix-org/synapse/issues/16063)) -- Override global statement timeout when creating indexes in Postgres. ([\#16085](https://github.com/matrix-org/synapse/issues/16085)) -- Fix the type annotation on `run_db_interaction` in the Module API. ([\#16089](https://github.com/matrix-org/synapse/issues/16089)) -- Clean-up the presence code. ([\#16092](https://github.com/matrix-org/synapse/issues/16092)) -- Run `pyupgrade` for Python 3.8+. ([\#16110](https://github.com/matrix-org/synapse/issues/16110)) -- Rename pagination and purge locks and add comments to explain why they exist and how they work. ([\#16112](https://github.com/matrix-org/synapse/issues/16112)) -- Attempt to fix the twisted trunk job. ([\#16115](https://github.com/matrix-org/synapse/issues/16115)) -- Cache token introspection response from OIDC provider. ([\#16117](https://github.com/matrix-org/synapse/issues/16117)) -- Add cache to `get_server_keys_json_for_remote`. ([\#16123](https://github.com/matrix-org/synapse/issues/16123)) -- Add an admin endpoint to allow authorizing server to signal token revocations. ([\#16125](https://github.com/matrix-org/synapse/issues/16125)) -- Add response time metrics for introspection requests for delegated auth. ([\#16131](https://github.com/matrix-org/synapse/issues/16131)) -- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. ([\#16132](https://github.com/matrix-org/synapse/issues/16132)) -- Increase performance of read/write locks. ([\#16149](https://github.com/matrix-org/synapse/issues/16149)) -- Improve presence tests. ([\#16150](https://github.com/matrix-org/synapse/issues/16150), [\#16151](https://github.com/matrix-org/synapse/issues/16151), [\#16158](https://github.com/matrix-org/synapse/issues/16158)) -- Raised the poetry-core version cap to 1.7.0. ([\#16152](https://github.com/matrix-org/synapse/issues/16152)) -- Fix assertion in user directory unit tests. ([\#16157](https://github.com/matrix-org/synapse/issues/16157)) -- Reduce scope of locks when paginating to alleviate DB contention. ([\#16159](https://github.com/matrix-org/synapse/issues/16159)) -- Reduce DB contention on worker locks. ([\#16160](https://github.com/matrix-org/synapse/issues/16160)) -- Task scheduler: mark task as active if we are scheduling as soon as possible. ([\#16165](https://github.com/matrix-org/synapse/issues/16165)) -- Implements a task scheduler for resumable potentially long running tasks. ([\#15891](https://github.com/matrix-org/synapse/issues/15891)) -### Updates to locked dependencies -* Bump click from 8.1.6 to 8.1.7. ([\#16145](https://github.com/matrix-org/synapse/issues/16145)) -* Bump gitpython from 3.1.31 to 3.1.32. ([\#16103](https://github.com/matrix-org/synapse/issues/16103)) -* Bump ijson from 3.2.1 to 3.2.3. ([\#16143](https://github.com/matrix-org/synapse/issues/16143)) -* Bump isort from 5.11.5 to 5.12.0. ([\#16108](https://github.com/matrix-org/synapse/issues/16108)) -* Bump log from 0.4.19 to 0.4.20. ([\#16109](https://github.com/matrix-org/synapse/issues/16109)) -* Bump pygithub from 1.59.0 to 1.59.1. ([\#16144](https://github.com/matrix-org/synapse/issues/16144)) -* Bump sentry-sdk from 1.28.1 to 1.29.2. ([\#16142](https://github.com/matrix-org/synapse/issues/16142)) -* Bump serde from 1.0.183 to 1.0.184. ([\#16139](https://github.com/matrix-org/synapse/issues/16139)) -* Bump txredisapi from 1.4.9 to 1.4.10. ([\#16107](https://github.com/matrix-org/synapse/issues/16107)) -* Bump types-bleach from 6.0.0.3 to 6.0.0.4. ([\#16106](https://github.com/matrix-org/synapse/issues/16106)) -* Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105)) -* Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146)) -# Synapse 1.90.0 (2023-08-15) - -No significant changes since 1.90.0rc1. - - -# Synapse 1.90.0rc1 (2023-08-08) +# Synapse 1.123.0rc1 (2025-01-21) ### Features -- Scope transaction IDs to devices (implement [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970)). ([\#15629](https://github.com/matrix-org/synapse/issues/15629)) -- Remove old rows from the `cache_invalidation_stream_by_instance` table automatically (this table is unused in SQLite). ([\#15868](https://github.com/matrix-org/synapse/issues/15868)) +- Implement [MSC4133](https://github.com/matrix-org/matrix-spec-proposals/pull/4133) for custom profile fields. Contributed by @clokep. ([\#17488](https://github.com/element-hq/synapse/issues/17488)) +- Add a query parameter `type` to the [Room State Admin API](https://element-hq.github.io/synapse/develop/admin_api/rooms.html#room-state-api) that filters the state event. ([\#18035](https://github.com/element-hq/synapse/issues/18035)) +- Support the new `/auth_metadata` endpoint defined in [MSC2965](https://github.com/matrix-org/matrix-spec-proposals/pull/2965). ([\#18093](https://github.com/element-hq/synapse/issues/18093)) ### Bugfixes -- Fix a long-standing bug where purging history and paginating simultaneously could lead to database corruption when using workers. ([\#15791](https://github.com/matrix-org/synapse/issues/15791)) -- Fix a long-standing bug where profile endpoint returned a 404 when the user's display name was empty. ([\#16012](https://github.com/matrix-org/synapse/issues/16012)) -- Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms. ([\#16043](https://github.com/matrix-org/synapse/issues/16043)) -- Fix long-standing bug with deletion in dehydrated devices v2. ([\#16046](https://github.com/matrix-org/synapse/issues/16046)) - -### Updates to the Docker image - -- Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa. ([\#15972](https://github.com/matrix-org/synapse/issues/15972), [\#16009](https://github.com/matrix-org/synapse/issues/16009)) +- Fix membership caches not updating in state reset scenarios. ([\#17732](https://github.com/element-hq/synapse/issues/17732)) +- Fix rare race where on upgrade to v1.122.0 a long running database upgrade could lock out new events from being received or sent. ([\#18091](https://github.com/element-hq/synapse/issues/18091)) ### Improved Documentation -- Add a internal documentation page describing the ["streams" used within Synapse](https://matrix-org.github.io/synapse/v1.90/development/synapse_architecture/streams.html). ([\#16015](https://github.com/matrix-org/synapse/issues/16015)) -- Clarify comment on the keys/upload over replication enpoint. ([\#16016](https://github.com/matrix-org/synapse/issues/16016)) -- Do not expose Admin API in caddy reverse proxy example. Contributed by @NilsIrl. ([\#16027](https://github.com/matrix-org/synapse/issues/16027)) +- Document `tls` option for a worker instance in `instance_map`. ([\#18064](https://github.com/element-hq/synapse/issues/18064)) ### Deprecations and Removals -- Remove support for legacy application service paths. ([\#15964](https://github.com/matrix-org/synapse/issues/15964)) -- Move support for application service query parameter authorization behind a configuration option. ([\#16017](https://github.com/matrix-org/synapse/issues/16017)) +- Remove the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) implementation. The stable support remains, per [Matrix 1.13](https://spec.matrix.org/v1.13/client-server-api/#post_matrixclientv3roomsroomidreport). ([\#18052](https://github.com/element-hq/synapse/issues/18052)) ### Internal Changes -- Update SQL queries to inline boolean parameters as supported in SQLite 3.27. ([\#15525](https://github.com/matrix-org/synapse/issues/15525)) -- Allow for the configuration of the backoff algorithm for federation destinations. ([\#15754](https://github.com/matrix-org/synapse/issues/15754)) -- Allow modules to check whether the current worker is configured to run background tasks. ([\#15991](https://github.com/matrix-org/synapse/issues/15991)) -- Update support for [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958) to match the latest revision of the MSC. ([\#15992](https://github.com/matrix-org/synapse/issues/15992)) -- Allow modules to schedule delayed background calls. ([\#15993](https://github.com/matrix-org/synapse/issues/15993)) -- Properly overwrite the `redacts` content-property for forwards-compatibility with room versions 1 through 10. ([\#16013](https://github.com/matrix-org/synapse/issues/16013)) -- Fix building the nix development environment on MacOS systems. ([\#16019](https://github.com/matrix-org/synapse/issues/16019)) -- Remove leading and trailing spaces when setting a display name. ([\#16031](https://github.com/matrix-org/synapse/issues/16031)) -- Combine duplicated code. ([\#16023](https://github.com/matrix-org/synapse/issues/16023)) -- Collect additional metrics from `ResponseCache` for eviction. ([\#16028](https://github.com/matrix-org/synapse/issues/16028)) -- Fix endpoint improperly declaring support for MSC3814. ([\#16068](https://github.com/matrix-org/synapse/issues/16068)) -- Drop backwards compat hack for event serialization. ([\#16069](https://github.com/matrix-org/synapse/issues/16069)) - -### Updates to locked dependencies - -* Update PyYAML to 6.0.1. ([\#16011](https://github.com/matrix-org/synapse/issues/16011)) -* Bump cryptography from 41.0.2 to 41.0.3. ([\#16048](https://github.com/matrix-org/synapse/issues/16048)) -* Bump furo from 2023.5.20 to 2023.7.26. ([\#16077](https://github.com/matrix-org/synapse/issues/16077)) -* Bump immutabledict from 2.2.4 to 3.0.0. ([\#16034](https://github.com/matrix-org/synapse/issues/16034)) -* Update certifi to 2023.7.22 and pygments to 2.15.1. ([\#16044](https://github.com/matrix-org/synapse/issues/16044)) -* Bump jsonschema from 4.18.3 to 4.19.0. ([\#16081](https://github.com/matrix-org/synapse/issues/16081)) -* Bump phonenumbers from 8.13.14 to 8.13.18. ([\#16076](https://github.com/matrix-org/synapse/issues/16076)) -* Bump regex from 1.9.1 to 1.9.3. ([\#16073](https://github.com/matrix-org/synapse/issues/16073)) -* Bump serde from 1.0.171 to 1.0.175. ([\#15982](https://github.com/matrix-org/synapse/issues/15982)) -* Bump serde from 1.0.175 to 1.0.179. ([\#16033](https://github.com/matrix-org/synapse/issues/16033)) -* Bump serde from 1.0.179 to 1.0.183. ([\#16074](https://github.com/matrix-org/synapse/issues/16074)) -* Bump serde_json from 1.0.103 to 1.0.104. ([\#16032](https://github.com/matrix-org/synapse/issues/16032)) -* Bump service-identity from 21.1.0 to 23.1.0. ([\#16038](https://github.com/matrix-org/synapse/issues/16038)) -* Bump types-commonmark from 0.9.2.3 to 0.9.2.4. ([\#16037](https://github.com/matrix-org/synapse/issues/16037)) -* Bump types-jsonschema from 4.17.0.8 to 4.17.0.10. ([\#16036](https://github.com/matrix-org/synapse/issues/16036)) -* Bump types-netaddr from 0.8.0.8 to 0.8.0.9. ([\#16035](https://github.com/matrix-org/synapse/issues/16035)) -* Bump types-opentracing from 2.4.10.5 to 2.4.10.6. ([\#16078](https://github.com/matrix-org/synapse/issues/16078)) -* Bump types-setuptools from 68.0.0.0 to 68.0.0.3. ([\#16079](https://github.com/matrix-org/synapse/issues/16079)) - -# Synapse 1.89.0 (2023-08-01) - -No significant changes since 1.89.0rc1. - +- Increase invite rate limits (`rc_invites.per_issuer`) for Complement. ([\#18072](https://github.com/element-hq/synapse/issues/18072)) -# Synapse 1.89.0rc1 (2023-07-25) -### Features - -- Add Unix Socket support for HTTP Replication Listeners. [Document and provide usage instructions](https://matrix-org.github.io/synapse/v1.89/usage/configuration/config_documentation.html#listeners) for utilizing Unix sockets in Synapse. Contributed by Jason Little. ([\#15708](https://github.com/matrix-org/synapse/issues/15708), [\#15924](https://github.com/matrix-org/synapse/issues/15924)) -- Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009). ([\#15911](https://github.com/matrix-org/synapse/issues/15911)) -- Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820). ([\#15912](https://github.com/matrix-org/synapse/issues/15912)) -- Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. ([\#15913](https://github.com/matrix-org/synapse/issues/15913), [\#15969](https://github.com/matrix-org/synapse/issues/15969)) -- Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly, H-Shay and poljar. ([\#15929](https://github.com/matrix-org/synapse/issues/15929)) - -### Bugfixes - -- Fix a long-standing bug where remote invites weren't correctly pushed. ([\#15820](https://github.com/matrix-org/synapse/issues/15820)) -- Fix background schema updates failing over a large upgrade gap. ([\#15887](https://github.com/matrix-org/synapse/issues/15887)) -- Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting. ([\#15925](https://github.com/matrix-org/synapse/issues/15925)) -- Fixed deploy annotations in the provided Grafana dashboard config, so that it shows for any homeserver and not just matrix.org. Contributed by @wrjlewis. ([\#15957](https://github.com/matrix-org/synapse/issues/15957)) -- Ensure a long state res does not starve CPU by occasionally yielding to the reactor. ([\#15960](https://github.com/matrix-org/synapse/issues/15960)) -- Properly handle redactions of creation events. ([\#15973](https://github.com/matrix-org/synapse/issues/15973)) -- Fix a bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. ([\#15975](https://github.com/matrix-org/synapse/issues/15975)) - -### Improved Documentation - -- Better clarify how to run a worker instance (pass both configs). ([\#15921](https://github.com/matrix-org/synapse/issues/15921)) -- Improve [the documentation](https://matrix-org.github.io/synapse/v1.89/admin_api/user_admin_api.html#login-as-a-user) for the login as a user admin API. ([\#15938](https://github.com/matrix-org/synapse/issues/15938)) -- Fix broken Arch Linux package link. Contributed by @SnipeXandrej. ([\#15981](https://github.com/matrix-org/synapse/issues/15981)) - -### Deprecations and Removals - -- Remove support for calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15928](https://github.com/matrix-org/synapse/issues/15928)) - -### Internal Changes - -- Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it. ([\#15884](https://github.com/matrix-org/synapse/issues/15884)) -- Document which Python version runs on a given Linux distribution so we can more easily clean up later. ([\#15909](https://github.com/matrix-org/synapse/issues/15909)) -- Add details to warning in log when we fail to fetch an alias. ([\#15922](https://github.com/matrix-org/synapse/issues/15922)) -- Remove unneeded `__init__`. ([\#15926](https://github.com/matrix-org/synapse/issues/15926)) -- Fix bug with read/write lock implementation. This is currently unused so has no observable effects. ([\#15933](https://github.com/matrix-org/synapse/issues/15933), [\#15958](https://github.com/matrix-org/synapse/issues/15958)) -- Unbreak the nix development environment by pinning the Rust version to 1.70.0. ([\#15940](https://github.com/matrix-org/synapse/issues/15940)) -- Update presence metrics to differentiate remote vs local users. ([\#15952](https://github.com/matrix-org/synapse/issues/15952)) -- Stop reading from column `user_id` of table `profiles`. ([\#15955](https://github.com/matrix-org/synapse/issues/15955)) -- Build packages for Debian Trixie. ([\#15961](https://github.com/matrix-org/synapse/issues/15961)) -- Reduce the amount of state we pull out. ([\#15968](https://github.com/matrix-org/synapse/issues/15968)) -- Speed up updating state in large rooms. ([\#15971](https://github.com/matrix-org/synapse/issues/15971)) ### Updates to locked dependencies -* Bump anyhow from 1.0.71 to 1.0.72. ([\#15949](https://github.com/matrix-org/synapse/issues/15949)) -* Bump click from 8.1.3 to 8.1.6. ([\#15984](https://github.com/matrix-org/synapse/issues/15984)) -* Bump cryptography from 41.0.1 to 41.0.2. ([\#15943](https://github.com/matrix-org/synapse/issues/15943)) -* Bump jsonschema from 4.17.3 to 4.18.3. ([\#15948](https://github.com/matrix-org/synapse/issues/15948)) -* Bump pillow from 9.4.0 to 10.0.0. ([\#15986](https://github.com/matrix-org/synapse/issues/15986)) -* Bump prometheus-client from 0.17.0 to 0.17.1. ([\#15945](https://github.com/matrix-org/synapse/issues/15945)) -* Bump pydantic from 1.10.10 to 1.10.11. ([\#15946](https://github.com/matrix-org/synapse/issues/15946)) -* Bump pygithub from 1.58.2 to 1.59.0. ([\#15834](https://github.com/matrix-org/synapse/issues/15834)) -* Bump pyo3-log from 0.8.2 to 0.8.3. ([\#15951](https://github.com/matrix-org/synapse/issues/15951)) -* Bump sentry-sdk from 1.26.0 to 1.28.1. ([\#15985](https://github.com/matrix-org/synapse/issues/15985)) -* Bump serde_json from 1.0.100 to 1.0.103. ([\#15950](https://github.com/matrix-org/synapse/issues/15950)) -* Bump types-pillow from 9.5.0.4 to 10.0.0.1. ([\#15932](https://github.com/matrix-org/synapse/issues/15932)) -* Bump types-requests from 2.31.0.1 to 2.31.0.2. ([\#15983](https://github.com/matrix-org/synapse/issues/15983)) -* Bump typing-extensions from 4.5.0 to 4.7.1. ([\#15947](https://github.com/matrix-org/synapse/issues/15947)) - -# Synapse 1.88.0 (2023-07-18) - -This release - - raises the minimum supported version of Python to 3.8, as Python 3.7 is now [end-of-life](https://devguide.python.org/versions/), and - - removes deprecated config options related to worker deployment. - -See [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#upgrading-to-v1880) for more information. - - -### Bugfixes - -- Revert "Stop writing to column `user_id` of tables `profiles` and `user_filters`", which was introduced in Synapse 1.88.0rc1. ([\#15953](https://github.com/matrix-org/synapse/issues/15953)) +* Bump jinja2 from 3.1.4 to 3.1.5. ([\#18067](https://github.com/element-hq/synapse/issues/18067)) +* Bump mypy from 1.12.1 to 1.13.0. ([\#18083](https://github.com/element-hq/synapse/issues/18083)) +* Bump pillow from 11.0.0 to 11.1.0. ([\#18084](https://github.com/element-hq/synapse/issues/18084)) +* Bump pyo3 from 0.23.3 to 0.23.4. ([\#18079](https://github.com/element-hq/synapse/issues/18079)) +* Bump pyopenssl from 24.2.1 to 24.3.0. ([\#18062](https://github.com/element-hq/synapse/issues/18062)) +* Bump serde_json from 1.0.134 to 1.0.135. ([\#18081](https://github.com/element-hq/synapse/issues/18081)) +* Bump ulid from 1.1.3 to 1.1.4. ([\#18080](https://github.com/element-hq/synapse/issues/18080)) +# Synapse 1.122.0 (2025-01-14) -# Synapse 1.88.0rc1 (2023-07-11) +Please note that this version of Synapse drops support for PostgreSQL 11 and 12. The minimum version of PostgreSQL supported is now version 13. -### Features - -- Add `not_user_type` param to the [list accounts admin API](https://matrix-org.github.io/synapse/v1.88/admin_api/user_admin_api.html#list-accounts). ([\#15844](https://github.com/matrix-org/synapse/issues/15844)) +No significant changes since 1.122.0rc1. -### Bugfixes -- Pin `pydantic` to `^=1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release. - Contributed by @PaarthShah. ([\#15862](https://github.com/matrix-org/synapse/issues/15862)) -- Correctly resize thumbnails with pillow version >=10. ([\#15876](https://github.com/matrix-org/synapse/issues/15876)) - -### Improved Documentation - -- Fixed header levels on the [Admin API "Users"](https://matrix-org.github.io/synapse/v1.87/admin_api/user_admin_api.html) documentation page. Contributed by @sumnerevans at @beeper. ([\#15852](https://github.com/matrix-org/synapse/issues/15852)) -- Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. ([\#15872](https://github.com/matrix-org/synapse/issues/15872)) +# Synapse 1.122.0rc1 (2025-01-07) ### Deprecations and Removals -- **Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options.** See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#removal-of-worker_replication_-settings) for more details. ([\#15860](https://github.com/matrix-org/synapse/issues/15860)) -- Remove support for Python 3.7 and hence for Debian Buster. ([\#15851](https://github.com/matrix-org/synapse/issues/15851), [\#15892](https://github.com/matrix-org/synapse/issues/15892), [\#15893](https://github.com/matrix-org/synapse/issues/15893), [\#15917](https://github.com/matrix-org/synapse/pull/15917)) - -### Internal Changes - -- Add foreign key constraint to `event_forward_extremities`. ([\#15751](https://github.com/matrix-org/synapse/issues/15751), [\#15907](https://github.com/matrix-org/synapse/issues/15907)) -- Add read/write style cross-worker locks. ([\#15782](https://github.com/matrix-org/synapse/issues/15782)) -- Stop writing to column `user_id` of tables `profiles` and `user_filters`. ([\#15787](https://github.com/matrix-org/synapse/issues/15787)) -- Use lower isolation level when cleaning old presence stream data to avoid serialization errors. ([\#15826](https://github.com/matrix-org/synapse/issues/15826)) -- Add tracing to media `/upload` code paths. ([\#15850](https://github.com/matrix-org/synapse/issues/15850), [\#15888](https://github.com/matrix-org/synapse/issues/15888)) -- Add a timeout that aborts any Postgres statement taking more than 1 hour. ([\#15853](https://github.com/matrix-org/synapse/issues/15853)) -- Fix the `devenv up` configuration which was ignoring the config overrides. ([\#15854](https://github.com/matrix-org/synapse/issues/15854)) -- Optimised cleanup of old entries in `device_lists_stream`. ([\#15861](https://github.com/matrix-org/synapse/issues/15861)) -- Update the Matrix clients link in the _It works! Synapse is running_ landing page. ([\#15874](https://github.com/matrix-org/synapse/issues/15874)) -- Fix building Synapse with the nightly Rust compiler. ([\#15906](https://github.com/matrix-org/synapse/issues/15906)) -- Add `Server` to Access-Control-Expose-Headers header. ([\#15908](https://github.com/matrix-org/synapse/issues/15908)) - -### Updates to locked dependencies - -* Bump authlib from 1.2.0 to 1.2.1. ([\#15864](https://github.com/matrix-org/synapse/issues/15864)) -* Bump importlib-metadata from 6.6.0 to 6.7.0. ([\#15865](https://github.com/matrix-org/synapse/issues/15865)) -* Bump lxml from 4.9.2 to 4.9.3. ([\#15897](https://github.com/matrix-org/synapse/issues/15897)) -* Bump regex from 1.8.4 to 1.9.1. ([\#15902](https://github.com/matrix-org/synapse/issues/15902)) -* Bump ruff from 0.0.275 to 0.0.277. ([\#15900](https://github.com/matrix-org/synapse/issues/15900)) -* Bump sentry-sdk from 1.25.1 to 1.26.0. ([\#15867](https://github.com/matrix-org/synapse/issues/15867)) -* Bump serde_json from 1.0.99 to 1.0.100. ([\#15901](https://github.com/matrix-org/synapse/issues/15901)) -* Bump types-pyopenssl from 23.2.0.0 to 23.2.0.1. ([\#15866](https://github.com/matrix-org/synapse/issues/15866)) - -# Synapse 1.87.0 (2023-07-04) - -Please note that this will be the last release of Synapse that is compatible with -Python 3.7 and earlier. -This is due to Python 3.7 now having reached End of Life; see our [deprecation policy](https://matrix-org.github.io/synapse/v1.87/deprecation_policy.html) -for more details. - -### Bugfixes - -- Pin `pydantic` to `^1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release. - Resolves https://github.com/matrix-org/synapse/issues/15858. - Contributed by @PaarthShah. ([\#15862](https://github.com/matrix-org/synapse/issues/15862)) - -### Internal Changes - -- Split out 2022 changes from the changelog so the rendered version in GitHub doesn't timeout as much. ([\#15846](https://github.com/matrix-org/synapse/issues/15846)) - - -# Synapse 1.87.0rc1 (2023-06-27) +- Remove support for PostgreSQL 11 and 12. Contributed by @clokep. ([\#18034](https://github.com/element-hq/synapse/issues/18034)) ### Features -- Improve `/messages` response time by avoiding backfill when we already have messages to return. ([\#15737](https://github.com/matrix-org/synapse/issues/15737)) -- Add spam checker module API for logins. ([\#15838](https://github.com/matrix-org/synapse/issues/15838)) +- Added the `email.tlsname` config option. This allows specifying the domain name used to validate the SMTP server's TLS certificate separately from the `email.smtp_host` to connect to. ([\#17849](https://github.com/element-hq/synapse/issues/17849)) +- Module developers will have access to the user ID of the requester when adding `check_username_for_spam` callbacks to `spam_checker_module_callbacks`. Contributed by Wilson@Pangea.chat. ([\#17916](https://github.com/element-hq/synapse/issues/17916)) +- Add endpoints to the Admin API to fetch the number of invites the provided user has sent after a given timestamp, + fetch the number of rooms the provided user has joined after a given timestamp, and get report IDs of event + reports against a provided user (i.e. where the user was the sender of the reported event). ([\#17948](https://github.com/element-hq/synapse/issues/17948)) +- Support stable account suspension from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#17964](https://github.com/element-hq/synapse/issues/17964)) +- Add `macaroon_secret_key_path` config option. ([\#17983](https://github.com/element-hq/synapse/issues/17983)) ### Bugfixes -- Fix a long-standing bug where media files were served in an unsafe manner. Contributed by @joshqou. ([\#15680](https://github.com/matrix-org/synapse/issues/15680)) -- Avoid invalidating a cache that was just prefilled. ([\#15758](https://github.com/matrix-org/synapse/issues/15758)) -- Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). ([\#15770](https://github.com/matrix-org/synapse/issues/15770)) -- Fix joining rooms through aliases where the alias server isn't a real homeserver. Contributed by @tulir @ Beeper. ([\#15776](https://github.com/matrix-org/synapse/issues/15776)) -- Fix a bug in push rules handling leading to an invalid (per spec) `is_user_mention` rule sent to clients. Also fix wrong rule names for `is_user_mention` and `is_room_mention`. ([\#15781](https://github.com/matrix-org/synapse/issues/15781)) -- Fix a bug introduced in 1.57.0 where the wrong table would be locked on updating database rows when using SQLite as the database backend. ([\#15788](https://github.com/matrix-org/synapse/issues/15788)) -- Fix Sytest environmental variable evaluation in CI. ([\#15804](https://github.com/matrix-org/synapse/issues/15804)) -- Fix forgotten rooms missing from initial sync after rejoining them. Contributed by Nico from Famedly. ([\#15815](https://github.com/matrix-org/synapse/issues/15815)) -- Fix sqlite `user_filters` upgrade introduced in v1.86.0. ([\#15817](https://github.com/matrix-org/synapse/issues/15817)) +- Fix bug when rejecting withdrew invite with a `third_party_rules` module, where the invite would be stuck for the client. ([\#17930](https://github.com/element-hq/synapse/issues/17930)) +- Properly purge state groups tables when purging a room with the Admin API. ([\#18024](https://github.com/element-hq/synapse/issues/18024)) +- Fix a bug preventing the admin redaction endpoint from working on messages from remote users. ([\#18029](https://github.com/element-hq/synapse/issues/18029), [\#18043](https://github.com/element-hq/synapse/issues/18043)) ### Improved Documentation -- Document `looping_call()` functionality that will wait for the given function to finish before scheduling another. ([\#15772](https://github.com/matrix-org/synapse/issues/15772)) -- Fix a typo in the [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html). ([\#15805](https://github.com/matrix-org/synapse/issues/15805)) -- Fix typo in MSC number in faster remote room join architecture doc. ([\#15812](https://github.com/matrix-org/synapse/issues/15812)) - -### Deprecations and Removals - -- Remove experimental [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to incrementally import history into existing rooms. ([\#15748](https://github.com/matrix-org/synapse/issues/15748)) +- Update `synapse.app.generic_worker` documentation to only recommend `GET` requests for stream writer routes by default, unless the worker is also configured as a stream writer. Contributed by @evoL. ([\#17954](https://github.com/element-hq/synapse/issues/17954)) +- Add documentation for the previously-undocumented `last_seen_ts` query parameter to the query user Admin API. ([\#17976](https://github.com/element-hq/synapse/issues/17976)) +- Improve documentation for the `TaskScheduler` class. ([\#17992](https://github.com/element-hq/synapse/issues/17992)) +- Fix example in reverse proxy docs to include server port. ([\#17994](https://github.com/element-hq/synapse/issues/17994)) +- Update Alpine Linux Synapse Package Maintainer within the installation instructions. ([\#17846](https://github.com/element-hq/synapse/issues/17846)) ### Internal Changes -- Replace `EventContext` fields `prev_group` and `delta_ids` with field `state_group_deltas`. ([\#15233](https://github.com/matrix-org/synapse/issues/15233)) -- Regularly try to send transactions to other servers after they failed instead of waiting for a new event to be available before trying. ([\#15743](https://github.com/matrix-org/synapse/issues/15743)) -- Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). ([\#15755](https://github.com/matrix-org/synapse/issues/15755)) -- Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. ([\#15783](https://github.com/matrix-org/synapse/issues/15783)) -- Switch from `matrix://` to `matrix-federation://` scheme for internal Synapse routing of outbound federation traffic. ([\#15806](https://github.com/matrix-org/synapse/issues/15806)) -- Fix harmless exceptions being printed when running the port DB script. ([\#15814](https://github.com/matrix-org/synapse/issues/15814)) - -### Updates to locked dependencies - -* Bump attrs from 22.2.0 to 23.1.0. ([\#15801](https://github.com/matrix-org/synapse/issues/15801)) -* Bump cryptography from 40.0.2 to 41.0.1. ([\#15800](https://github.com/matrix-org/synapse/issues/15800)) -* Bump ijson from 3.2.0.post0 to 3.2.1. ([\#15802](https://github.com/matrix-org/synapse/issues/15802)) -* Bump phonenumbers from 8.13.13 to 8.13.14. ([\#15798](https://github.com/matrix-org/synapse/issues/15798)) -* Bump ruff from 0.0.265 to 0.0.272. ([\#15799](https://github.com/matrix-org/synapse/issues/15799)) -* Bump ruff from 0.0.272 to 0.0.275. ([\#15833](https://github.com/matrix-org/synapse/issues/15833)) -* Bump serde_json from 1.0.96 to 1.0.97. ([\#15797](https://github.com/matrix-org/synapse/issues/15797)) -* Bump serde_json from 1.0.97 to 1.0.99. ([\#15832](https://github.com/matrix-org/synapse/issues/15832)) -* Bump towncrier from 22.12.0 to 23.6.0. ([\#15831](https://github.com/matrix-org/synapse/issues/15831)) -* Bump types-opentracing from 2.4.10.4 to 2.4.10.5. ([\#15830](https://github.com/matrix-org/synapse/issues/15830)) -* Bump types-setuptools from 67.8.0.0 to 68.0.0.0. ([\#15835](https://github.com/matrix-org/synapse/issues/15835)) - -Synapse 1.86.0 (2023-06-20) -=========================== - -No significant changes since 1.86.0rc2. - - -Synapse 1.86.0rc2 (2023-06-14) -============================== - -Bugfixes --------- - -- Fix an error when having workers of different versions running. ([\#15774](https://github.com/matrix-org/synapse/issues/15774)) - - -Synapse 1.86.0rc1 (2023-06-13) -============================== - -This version was tagged but never released. - -Features --------- - -- Stable support for [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#15388](https://github.com/matrix-org/synapse/issues/15388)) -- Support resolving a room's [canonical alias](https://spec.matrix.org/v1.7/client-server-api/#mroomcanonical_alias) via the module API. ([\#15450](https://github.com/matrix-org/synapse/issues/15450)) -- Enable support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#15520](https://github.com/matrix-org/synapse/issues/15520)) -- Experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support: delegate auth to an OIDC provider. ([\#15582](https://github.com/matrix-org/synapse/issues/15582)) -- Add Synapse version deploy annotations to Grafana dashboard which enables easy correlation between behavior changes witnessed in a graph to a certain Synapse version and nail down regressions. ([\#15674](https://github.com/matrix-org/synapse/issues/15674)) -- Add a catch-all * to the supported relation types when redacting an event and its related events. This is an update to [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation. ([\#15705](https://github.com/matrix-org/synapse/issues/15705)) -- Speed up `/messages` by backfilling in the background when there are no backward extremities where we are directly paginating. ([\#15710](https://github.com/matrix-org/synapse/issues/15710)) -- Expose a metric reporting the database background update status. ([\#15740](https://github.com/matrix-org/synapse/issues/15740)) - - -Bugfixes --------- - -- Correctly clear caches when we delete a room. ([\#15609](https://github.com/matrix-org/synapse/issues/15609)) -- Check permissions for enabling encryption earlier during room creation to avoid creating broken rooms. ([\#15695](https://github.com/matrix-org/synapse/issues/15695)) - - -Improved Documentation ----------------------- - -- Simplify query to find participating servers in a room. ([\#15732](https://github.com/matrix-org/synapse/issues/15732)) - - -Internal Changes ----------------- - -- Log when events are (maybe unexpectedly) filtered out of responses in tests. ([\#14213](https://github.com/matrix-org/synapse/issues/14213)) -- Read from column `full_user_id` rather than `user_id` of tables `profiles` and `user_filters`. ([\#15649](https://github.com/matrix-org/synapse/issues/15649)) -- Add support for tracing functions which return `Awaitable`s. ([\#15650](https://github.com/matrix-org/synapse/issues/15650)) -- Cache requests for user's devices over federation. ([\#15675](https://github.com/matrix-org/synapse/issues/15675)) -- Add fully qualified docker image names to Dockerfiles. ([\#15689](https://github.com/matrix-org/synapse/issues/15689)) -- Remove some unused code. ([\#15690](https://github.com/matrix-org/synapse/issues/15690)) -- Improve type hints. ([\#15694](https://github.com/matrix-org/synapse/issues/15694), [\#15697](https://github.com/matrix-org/synapse/issues/15697)) -- Update docstring and traces on `maybe_backfill()` functions. ([\#15709](https://github.com/matrix-org/synapse/issues/15709)) -- Add context for when/why to use the `long_retries` option when sending Federation requests. ([\#15721](https://github.com/matrix-org/synapse/issues/15721)) -- Removed some unused fields. ([\#15723](https://github.com/matrix-org/synapse/issues/15723)) -- Update federation error to more plainly explain we can only authorize our own membership events. ([\#15725](https://github.com/matrix-org/synapse/issues/15725)) -- Prevent the `latest_deps` and `twisted_trunk` daily GitHub Actions workflows from running on forks of the codebase. ([\#15726](https://github.com/matrix-org/synapse/issues/15726)) -- Improve performance of user directory search. ([\#15729](https://github.com/matrix-org/synapse/issues/15729)) -- Remove redundant table join with `room_memberships` when doing a `is_host_joined()`/`is_host_invited()` call (`membership` is already part of the `current_state_events`). ([\#15731](https://github.com/matrix-org/synapse/issues/15731)) -- Remove superfluous `room_memberships` join from background update. ([\#15733](https://github.com/matrix-org/synapse/issues/15733)) -- Speed up typechecking CI. ([\#15752](https://github.com/matrix-org/synapse/issues/15752)) -- Bump minimum supported Rust version to 1.60.0. ([\#15768](https://github.com/matrix-org/synapse/issues/15768)) +- Add `RoomID` & `EventID` rust types. ([\#17996](https://github.com/element-hq/synapse/issues/17996)) +- Fix various type errors across the codebase. ([\#17998](https://github.com/element-hq/synapse/issues/17998)) +- Disable DB statement timeout when doing a room purge since it can be quite long. ([\#18017](https://github.com/element-hq/synapse/issues/18017)) +- Remove some remaining uses of `twisted.internet.defer.returnValue`. Contributed by Colin Watson. ([\#18020](https://github.com/element-hq/synapse/issues/18020)) +- Refactor `get_profile` to no longer include fields with a value of `None`. ([\#18063](https://github.com/element-hq/synapse/issues/18063)) ### Updates to locked dependencies -* Bump importlib-metadata from 6.1.0 to 6.6.0. ([\#15711](https://github.com/matrix-org/synapse/issues/15711)) -* Bump library/redis from 6-bullseye to 7-bullseye in /docker. ([\#15712](https://github.com/matrix-org/synapse/issues/15712)) -* Bump log from 0.4.18 to 0.4.19. ([\#15761](https://github.com/matrix-org/synapse/issues/15761)) -* Bump phonenumbers from 8.13.11 to 8.13.13. ([\#15763](https://github.com/matrix-org/synapse/issues/15763)) -* Bump pyasn1 from 0.4.8 to 0.5.0. ([\#15713](https://github.com/matrix-org/synapse/issues/15713)) -* Bump pydantic from 1.10.8 to 1.10.9. ([\#15762](https://github.com/matrix-org/synapse/issues/15762)) -* Bump pyo3-log from 0.8.1 to 0.8.2. ([\#15759](https://github.com/matrix-org/synapse/issues/15759)) -* Bump pyopenssl from 23.1.1 to 23.2.0. ([\#15765](https://github.com/matrix-org/synapse/issues/15765)) -* Bump regex from 1.7.3 to 1.8.4. ([\#15769](https://github.com/matrix-org/synapse/issues/15769)) -* Bump sentry-sdk from 1.22.1 to 1.25.0. ([\#15714](https://github.com/matrix-org/synapse/issues/15714)) -* Bump sentry-sdk from 1.25.0 to 1.25.1. ([\#15764](https://github.com/matrix-org/synapse/issues/15764)) -* Bump serde from 1.0.163 to 1.0.164. ([\#15760](https://github.com/matrix-org/synapse/issues/15760)) -* Bump types-jsonschema from 4.17.0.7 to 4.17.0.8. ([\#15716](https://github.com/matrix-org/synapse/issues/15716)) -* Bump types-pyopenssl from 23.1.0.2 to 23.2.0.0. ([\#15766](https://github.com/matrix-org/synapse/issues/15766)) -* Bump types-requests from 2.31.0.0 to 2.31.0.1. ([\#15715](https://github.com/matrix-org/synapse/issues/15715)) - -Synapse 1.85.2 (2023-06-08) -=========================== - -Bugfixes --------- - -- Fix regression where using TLS for HTTP replication between workers did not work. Introduced in v1.85.0. ([\#15746](https://github.com/matrix-org/synapse/issues/15746)) - - -Synapse 1.85.1 (2023-06-07) -=========================== - -Note: this release only fixes a bug that stopped some deployments from upgrading to v1.85.0. There is no need to upgrade to v1.85.1 if successfully running v1.85.0. - -Bugfixes --------- - -- Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. ([\#15738](https://github.com/matrix-org/synapse/issues/15738), [\#15739](https://github.com/matrix-org/synapse/issues/15739)) - - -Synapse 1.85.0 (2023-06-06) -=========================== - -No significant changes since 1.85.0rc2. - - -## Security advisory - -The following issues are fixed in 1.85.0 (and RCs). - -- [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32682) — Low Severity - - It may be possible for a deactivated user to login when using uncommon configurations. - -- [GHSA-98px-6486-j7qc](https://github.com/matrix-org/synapse/security/advisories/GHSA-98px-6486-j7qc) / [CVE-2023-32683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity - - A discovered oEmbed or image URL can bypass the `url_preview_url_blacklist` setting potentially allowing server side request forgery or bypassing network policies. Impact is limited to IP addresses allowed by the `url_preview_ip_range_blacklist` setting (by default this only allows public IPs). - -See the advisories for more details. If you have any questions, email security@matrix.org. - - -Synapse 1.85.0rc2 (2023-06-01) -============================== - -Bugfixes --------- - -- Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. ([\#15693](https://github.com/matrix-org/synapse/issues/15693)) - - -Deprecations and Removals -------------------------- - -- Deprecate calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15703](https://github.com/matrix-org/synapse/issues/15703)) - - -Internal Changes ----------------- - -- Speed up background jobs `populate_full_user_id_user_filters` and `populate_full_user_id_profiles`. ([\#15700](https://github.com/matrix-org/synapse/issues/15700)) - - -Synapse 1.85.0rc1 (2023-05-30) -============================== - -Features --------- - -- Improve performance of backfill requests by performing backfill of previously failed requests in the background. ([\#15585](https://github.com/matrix-org/synapse/issues/15585)) -- Add a new [admin API](https://matrix-org.github.io/synapse/v1.85/usage/administration/admin_api/index.html) to [create a new device for a user](https://matrix-org.github.io/synapse/v1.85/admin_api/user_admin_api.html#create-a-device). ([\#15611](https://github.com/matrix-org/synapse/issues/15611)) -- Add Unix socket support for Redis connections. Contributed by Jason Little. ([\#15644](https://github.com/matrix-org/synapse/issues/15644)) - - -Bugfixes --------- - -- Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). ([\#15464](https://github.com/matrix-org/synapse/issues/15464)) -- Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. ([\#15601](https://github.com/matrix-org/synapse/issues/15601)) -- Fix a long-standing bug where filters with multiple backslashes were rejected. ([\#15607](https://github.com/matrix-org/synapse/issues/15607)) -- Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. ([\#15614](https://github.com/matrix-org/synapse/issues/15614)) -- Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). ([\#15624](https://github.com/matrix-org/synapse/issues/15624)) -- Fix a long-standing bug where deactivated users were able to login in uncommon situations. ([\#15634](https://github.com/matrix-org/synapse/issues/15634)) - - -Improved Documentation ----------------------- - -- Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. ([\#15613](https://github.com/matrix-org/synapse/issues/15613)) -- Remove outdated comment from the generated and sample homeserver log configs. ([\#15648](https://github.com/matrix-org/synapse/issues/15648)) -- Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. ([\#15668](https://github.com/matrix-org/synapse/issues/15668)) - - -Deprecations and Removals -------------------------- - -- Remove the old version of the R30 (30-day retained users) phone-home metric. ([\#10428](https://github.com/matrix-org/synapse/issues/10428)) - - -Internal Changes ----------------- - -- Create dependabot changelogs at release time. ([\#15481](https://github.com/matrix-org/synapse/issues/15481)) -- Add not null constraint to column `full_user_id` of tables `profiles` and `user_filters`. ([\#15537](https://github.com/matrix-org/synapse/issues/15537)) -- Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. ([\#15578](https://github.com/matrix-org/synapse/issues/15578)) -- Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. ([\#15597](https://github.com/matrix-org/synapse/issues/15597)) -- Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. ([\#15602](https://github.com/matrix-org/synapse/issues/15602)) -- Fix subscriptable type usage in Python <3.9. ([\#15604](https://github.com/matrix-org/synapse/issues/15604)) -- Update internal terminology. ([\#15606](https://github.com/matrix-org/synapse/issues/15606), [\#15620](https://github.com/matrix-org/synapse/issues/15620)) -- Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. ([\#15610](https://github.com/matrix-org/synapse/issues/15610), [\#15647](https://github.com/matrix-org/synapse/issues/15647)) -- Trace how many new events from the backfill response we need to process. ([\#15633](https://github.com/matrix-org/synapse/issues/15633)) -- Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. ([\#15615](https://github.com/matrix-org/synapse/issues/15615)) -- Update Mutual Rooms ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) -- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) -- Fix the olddeps CI. ([\#15626](https://github.com/matrix-org/synapse/issues/15626)) -- Remove duplicate timestamp from test logs (`_trial_temp/test.log`). ([\#15636](https://github.com/matrix-org/synapse/issues/15636)) -- Fix two memory leaks in `trial` test runs. ([\#15630](https://github.com/matrix-org/synapse/issues/15630)) -- Limit the size of the `HomeServerConfig` cache in trial test runs. ([\#15646](https://github.com/matrix-org/synapse/issues/15646)) -- Improve type hints. ([\#15658](https://github.com/matrix-org/synapse/issues/15658), [\#15659](https://github.com/matrix-org/synapse/issues/15659)) -- Add requesting user id parameter to key claim methods in `TransportLayerClient`. ([\#15663](https://github.com/matrix-org/synapse/issues/15663)) -- Speed up rebuilding of the user directory for local users. ([\#15665](https://github.com/matrix-org/synapse/issues/15665)) -- Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. ([\#15666](https://github.com/matrix-org/synapse/issues/15666), [\#15678](https://github.com/matrix-org/synapse/issues/15678)) - -### Updates to locked dependencies - -* Bump furo from 2023.3.27 to 2023.5.20. ([\#15642](https://github.com/matrix-org/synapse/issues/15642)) -* Bump log from 0.4.17 to 0.4.18. ([\#15681](https://github.com/matrix-org/synapse/issues/15681)) -* Bump prometheus-client from 0.16.0 to 0.17.0. ([\#15682](https://github.com/matrix-org/synapse/issues/15682)) -* Bump pydantic from 1.10.7 to 1.10.8. ([\#15685](https://github.com/matrix-org/synapse/issues/15685)) -* Bump pygithub from 1.58.1 to 1.58.2. ([\#15643](https://github.com/matrix-org/synapse/issues/15643)) -* Bump requests from 2.28.2 to 2.31.0. ([\#15651](https://github.com/matrix-org/synapse/issues/15651)) -* Bump sphinx from 6.1.3 to 6.2.1. ([\#15641](https://github.com/matrix-org/synapse/issues/15641)) -* Bump types-bleach from 6.0.0.1 to 6.0.0.3. ([\#15686](https://github.com/matrix-org/synapse/issues/15686)) -* Bump types-pillow from 9.5.0.2 to 9.5.0.4. ([\#15640](https://github.com/matrix-org/synapse/issues/15640)) -* Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. ([\#15683](https://github.com/matrix-org/synapse/issues/15683)) -* Bump types-requests from 2.30.0.0 to 2.31.0.0. ([\#15684](https://github.com/matrix-org/synapse/issues/15684)) -* Bump types-setuptools from 67.7.0.2 to 67.8.0.0. ([\#15639](https://github.com/matrix-org/synapse/issues/15639)) - -Synapse 1.84.1 (2023-05-26) -=========================== - -This patch release fixes a major issue with homeservers that do not have an `instance_map` defined but which do use workers. -If you have already upgraded to Synapse 1.84.0 and your homeserver is working normally, then there is no need to update to this patch release. - - -Bugfixes --------- - -- Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. ([\#15672](https://github.com/matrix-org/synapse/issues/15672)) - - -Internal Changes ----------------- - -- Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. ([\#15673](https://github.com/matrix-org/synapse/issues/15673)) - - -Synapse 1.84.0 (2023-05-23) -=========================== - -The `worker_replication_*` configuration settings have been deprecated in favour of configuring the main process consistently with other instances in the `instance_map`. The deprecated settings will be removed in Synapse v1.88.0, but changing your configuration in advance is recommended. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.84/docs/upgrade.md#upgrading-to-v1840) for more information. - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.84.0rc1 where errors during startup were not reported correctly on Python < 3.10. ([\#15599](https://github.com/matrix-org/synapse/issues/15599)) - - -Synapse 1.84.0rc1 (2023-05-16) -============================== - -Features --------- - -- Add an option to prevent media downloads from configured domains. ([\#15197](https://github.com/matrix-org/synapse/issues/15197)) -- Add `forget_rooms_on_leave` config option to automatically forget rooms when users leave them or are removed from them. ([\#15224](https://github.com/matrix-org/synapse/issues/15224)) -- Add redis TLS configuration options. ([\#15312](https://github.com/matrix-org/synapse/issues/15312)) -- Add a config option to delay push notifications by a random amount, to discourage time-based profiling. ([\#15516](https://github.com/matrix-org/synapse/issues/15516)) -- Stabilize support for [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15528](https://github.com/matrix-org/synapse/issues/15528)) -- Implement [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009) to expand the supported characters in Matrix IDs. ([\#15536](https://github.com/matrix-org/synapse/issues/15536)) -- Advertise support for Matrix 1.6 on `/_matrix/client/versions`. ([\#15559](https://github.com/matrix-org/synapse/issues/15559)) -- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15569](https://github.com/matrix-org/synapse/issues/15569)) - - -Bugfixes --------- - -- Don't fail on federation over TOR where SRV queries are not supported. Contributed by Zdzichu. ([\#15523](https://github.com/matrix-org/synapse/issues/15523)) -- Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. ([\#15554](https://github.com/matrix-org/synapse/issues/15554), [\#15555](https://github.com/matrix-org/synapse/issues/15555)) -- Fix a long-standing bug where an invalid membership event could cause an internal server error. ([\#15564](https://github.com/matrix-org/synapse/issues/15564)) -- Require at least poetry-core v1.1.0. ([\#15566](https://github.com/matrix-org/synapse/issues/15566), [\#15571](https://github.com/matrix-org/synapse/issues/15571)) - - -Deprecations and Removals -------------------------- - -- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491)) - - -Updates to the Docker image ---------------------------- - -- Add pkg-config package to Stage 0 to be able to build Dockerfile on ppc64le architecture. ([\#15567](https://github.com/matrix-org/synapse/issues/15567)) - - -Improved Documentation ----------------------- - -- Clarify documentation of the "Create or modify account" Admin API. ([\#15544](https://github.com/matrix-org/synapse/issues/15544)) -- Fix path to the `statistics/database/rooms` admin API in documentation. ([\#15560](https://github.com/matrix-org/synapse/issues/15560)) -- Update and improve Mastodon Single Sign-On documentation. ([\#15587](https://github.com/matrix-org/synapse/issues/15587)) - - -Internal Changes ----------------- - -- Use oEmbed to generate URL previews for YouTube Shorts. ([\#15025](https://github.com/matrix-org/synapse/issues/15025)) -- Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. ([\#15470](https://github.com/matrix-org/synapse/issues/15470)) -- Bump pyicu from 2.10.2 to 2.11. ([\#15509](https://github.com/matrix-org/synapse/issues/15509)) -- Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654). ([\#15522](https://github.com/matrix-org/synapse/issues/15522)) -- Don't use a trusted key server when running the demo scripts. ([\#15527](https://github.com/matrix-org/synapse/issues/15527)) -- Speed up rebuilding of the user directory for local users. ([\#15529](https://github.com/matrix-org/synapse/issues/15529)) -- Speed up deleting of old rows in `event_push_actions`. ([\#15531](https://github.com/matrix-org/synapse/issues/15531)) -- Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. ([\#15532](https://github.com/matrix-org/synapse/issues/15532), [\#15533](https://github.com/matrix-org/synapse/issues/15533), [\#15545](https://github.com/matrix-org/synapse/issues/15545)) -- Implement [MSC3987](https://github.com/matrix-org/matrix-spec-proposals/pull/3987) by removing `"dont_notify"` from the list of actions in default push rules. ([\#15534](https://github.com/matrix-org/synapse/issues/15534)) -- Move various module API callback registration methods to a dedicated class. ([\#15535](https://github.com/matrix-org/synapse/issues/15535)) -- Proxy `/user/devices` federation queries to application services for [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984). ([\#15539](https://github.com/matrix-org/synapse/issues/15539)) -- Factor out an `is_mine_server_name` method. ([\#15542](https://github.com/matrix-org/synapse/issues/15542)) -- Allow running Complement tests using [podman](https://podman.io/) by adding a `PODMAN` environment variable to `scripts-dev/complement.sh`. ([\#15543](https://github.com/matrix-org/synapse/issues/15543)) -- Bump serde from 1.0.160 to 1.0.162. ([\#15548](https://github.com/matrix-org/synapse/issues/15548)) -- Bump types-setuptools from 67.6.0.5 to 67.7.0.1. ([\#15549](https://github.com/matrix-org/synapse/issues/15549)) -- Bump sentry-sdk from 1.19.1 to 1.22.1. ([\#15550](https://github.com/matrix-org/synapse/issues/15550)) -- Bump ruff from 0.0.259 to 0.0.265. ([\#15551](https://github.com/matrix-org/synapse/issues/15551)) -- Bump hiredis from 2.2.2 to 2.2.3. ([\#15552](https://github.com/matrix-org/synapse/issues/15552)) -- Bump types-requests from 2.29.0.0 to 2.30.0.0. ([\#15553](https://github.com/matrix-org/synapse/issues/15553)) -- Add `org.matrix.msc3981` info to `/_matrix/client/versions`. ([\#15558](https://github.com/matrix-org/synapse/issues/15558)) -- Declare unstable support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) under `/_matrix/client/versions` if the experimental implementation is enabled. ([\#15562](https://github.com/matrix-org/synapse/issues/15562)) -- Implement [MSC3821](https://github.com/matrix-org/matrix-spec-proposals/pull/3821) to update the redaction rules. ([\#15563](https://github.com/matrix-org/synapse/issues/15563)) -- Implement updated redaction rules from [MSC3389](https://github.com/matrix-org/matrix-spec-proposals/pull/3389). ([\#15565](https://github.com/matrix-org/synapse/issues/15565)) -- Allow `pip install` to use setuptools_rust 1.6.0 when building Synapse. ([\#15570](https://github.com/matrix-org/synapse/issues/15570)) -- Deal with upcoming Github Actions deprecations. ([\#15576](https://github.com/matrix-org/synapse/issues/15576)) -- Export `run_as_background_process` from the module API. ([\#15577](https://github.com/matrix-org/synapse/issues/15577)) -- Update build system requirements to allow building with poetry-core==1.6.0. ([\#15588](https://github.com/matrix-org/synapse/issues/15588)) -- Bump serde from 1.0.162 to 1.0.163. ([\#15589](https://github.com/matrix-org/synapse/issues/15589)) -- Bump phonenumbers from 8.13.7 to 8.13.11. ([\#15590](https://github.com/matrix-org/synapse/issues/15590)) -- Bump types-psycopg2 from 2.9.21.9 to 2.9.21.10. ([\#15591](https://github.com/matrix-org/synapse/issues/15591)) -- Bump types-commonmark from 0.9.2.2 to 0.9.2.3. ([\#15592](https://github.com/matrix-org/synapse/issues/15592)) -- Bump types-setuptools from 67.7.0.1 to 67.7.0.2. ([\#15594](https://github.com/matrix-org/synapse/issues/15594)) - - -Synapse 1.83.0 (2023-05-09) -=========================== - -No significant changes since 1.83.0rc1. - - -Synapse 1.83.0rc1 (2023-05-02) -============================== - -Features --------- - -- Experimental support to recursively provide relations per [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981). ([\#15315](https://github.com/matrix-org/synapse/issues/15315)) -- Experimental support for [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970): Scope transaction IDs to devices. ([\#15318](https://github.com/matrix-org/synapse/issues/15318)) -- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/experimental_features.html) to support per-user feature flags. ([\#15344](https://github.com/matrix-org/synapse/issues/15344)) -- Add a module API to send an HTTP push notification. ([\#15387](https://github.com/matrix-org/synapse/issues/15387)) -- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/statistics.html#get-largest-rooms-by-size-in-database) to query the largest rooms by disk space used in the database. ([\#15482](https://github.com/matrix-org/synapse/issues/15482)) - - -Bugfixes --------- - -- Disable push rule evaluation for rooms excluded from sync. ([\#15361](https://github.com/matrix-org/synapse/issues/15361)) -- Fix a long-standing bug where cached server key results which were directly fetched would not be properly re-used. ([\#15417](https://github.com/matrix-org/synapse/issues/15417)) -- Fix a bug introduced in Synapse 1.73.0 where some experimental push rules were returned by default. ([\#15494](https://github.com/matrix-org/synapse/issues/15494)) - - -Improved Documentation ----------------------- - -- Add Nginx loadbalancing example with sticky mxid for workers. ([\#15411](https://github.com/matrix-org/synapse/issues/15411)) -- Update outdated development docs that mention restrictions in versions of SQLite that we no longer support. ([\#15498](https://github.com/matrix-org/synapse/issues/15498)) - - -Internal Changes ----------------- - -- Speedup tests by caching HomeServerConfig instances. ([\#15284](https://github.com/matrix-org/synapse/issues/15284)) -- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#15356](https://github.com/matrix-org/synapse/issues/15356)) -- Always use multi-user device resync replication endpoints. ([\#15418](https://github.com/matrix-org/synapse/issues/15418)) -- Add column `full_user_id` to tables `profiles` and `user_filters`. ([\#15458](https://github.com/matrix-org/synapse/issues/15458)) -- Update support for [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) to allow always returning fallback-keys in a `/keys/claim` request. ([\#15462](https://github.com/matrix-org/synapse/issues/15462)) -- Improve type hints. ([\#15465](https://github.com/matrix-org/synapse/issues/15465), [\#15496](https://github.com/matrix-org/synapse/issues/15496), [\#15497](https://github.com/matrix-org/synapse/issues/15497)) -- Support claiming more than one OTK at a time. ([\#15468](https://github.com/matrix-org/synapse/issues/15468)) -- Bump types-pyyaml from 6.0.12.8 to 6.0.12.9. ([\#15471](https://github.com/matrix-org/synapse/issues/15471)) -- Bump pyasn1-modules from 0.2.8 to 0.3.0. ([\#15473](https://github.com/matrix-org/synapse/issues/15473)) -- Bump cryptography from 40.0.1 to 40.0.2. ([\#15474](https://github.com/matrix-org/synapse/issues/15474)) -- Bump types-netaddr from 0.8.0.7 to 0.8.0.8. ([\#15475](https://github.com/matrix-org/synapse/issues/15475)) -- Bump types-jsonschema from 4.17.0.6 to 4.17.0.7. ([\#15476](https://github.com/matrix-org/synapse/issues/15476)) -- Ask bug reporters to provide logs as text. ([\#15479](https://github.com/matrix-org/synapse/issues/15479)) -- Add a Nix flake for use as a development environment. ([\#15495](https://github.com/matrix-org/synapse/issues/15495)) -- Bump anyhow from 1.0.70 to 1.0.71. ([\#15507](https://github.com/matrix-org/synapse/issues/15507)) -- Bump types-pillow from 9.4.0.19 to 9.5.0.2. ([\#15508](https://github.com/matrix-org/synapse/issues/15508)) -- Bump packaging from 23.0 to 23.1. ([\#15510](https://github.com/matrix-org/synapse/issues/15510)) -- Bump types-requests from 2.28.11.16 to 2.29.0.0. ([\#15511](https://github.com/matrix-org/synapse/issues/15511)) -- Bump setuptools-rust from 1.5.2 to 1.6.0. ([\#15512](https://github.com/matrix-org/synapse/issues/15512)) -- Update the check_schema_delta script to account for when the schema version has been bumped locally. ([\#15466](https://github.com/matrix-org/synapse/issues/15466)) - - -Synapse 1.82.0 (2023-04-25) -=========================== - -No significant changes since 1.82.0rc1. - - -Synapse 1.82.0rc1 (2023-04-18) -============================== - -Features --------- - -- Allow loading the `/directory/room/{roomAlias}` endpoint on workers. ([\#15333](https://github.com/matrix-org/synapse/issues/15333)) -- Add some validation to `instance_map` configuration loading. ([\#15431](https://github.com/matrix-org/synapse/issues/15431)) -- Allow loading the `/capabilities` endpoint on workers. ([\#15436](https://github.com/matrix-org/synapse/issues/15436)) - - -Bugfixes --------- - -- Delete server-side backup keys when deactivating an account. ([\#15181](https://github.com/matrix-org/synapse/issues/15181)) -- Fix and document untold assumption that `on_logged_out` module hooks will be called before the deletion of pushers. ([\#15410](https://github.com/matrix-org/synapse/issues/15410)) -- Improve robustness when handling a perspective key response by deduplicating received server keys. ([\#15423](https://github.com/matrix-org/synapse/issues/15423)) -- Synapse now correctly fails to start if the config option `app_service_config_files` is not a list. ([\#15425](https://github.com/matrix-org/synapse/issues/15425)) -- Disable loading `RefreshTokenServlet` (`/_matrix/client/(r0|v3|unstable)/refresh`) on workers. ([\#15428](https://github.com/matrix-org/synapse/issues/15428)) - - -Improved Documentation ----------------------- - -- Note that the `delete_stale_devices_after` background job always runs on the main process. ([\#15452](https://github.com/matrix-org/synapse/issues/15452)) - - -Deprecations and Removals -------------------------- - -- Remove the broken, unspecced registration fallback. Note that the *login* fallback is unaffected by this change. ([\#15405](https://github.com/matrix-org/synapse/issues/15405)) - - -Internal Changes ----------------- - -- Bump black from 23.1.0 to 23.3.0. ([\#15372](https://github.com/matrix-org/synapse/issues/15372)) -- Bump pyopenssl from 23.1.0 to 23.1.1. ([\#15373](https://github.com/matrix-org/synapse/issues/15373)) -- Bump types-psycopg2 from 2.9.21.8 to 2.9.21.9. ([\#15374](https://github.com/matrix-org/synapse/issues/15374)) -- Bump types-netaddr from 0.8.0.6 to 0.8.0.7. ([\#15375](https://github.com/matrix-org/synapse/issues/15375)) -- Bump types-opentracing from 2.4.10.3 to 2.4.10.4. ([\#15376](https://github.com/matrix-org/synapse/issues/15376)) -- Bump dawidd6/action-download-artifact from 2.26.0 to 2.26.1. ([\#15404](https://github.com/matrix-org/synapse/issues/15404)) -- Bump parameterized from 0.8.1 to 0.9.0. ([\#15412](https://github.com/matrix-org/synapse/issues/15412)) -- Bump types-pillow from 9.4.0.17 to 9.4.0.19. ([\#15413](https://github.com/matrix-org/synapse/issues/15413)) -- Bump sentry-sdk from 1.17.0 to 1.19.1. ([\#15414](https://github.com/matrix-org/synapse/issues/15414)) -- Bump immutabledict from 2.2.3 to 2.2.4. ([\#15415](https://github.com/matrix-org/synapse/issues/15415)) -- Bump dawidd6/action-download-artifact from 2.26.1 to 2.27.0. ([\#15441](https://github.com/matrix-org/synapse/issues/15441)) -- Bump serde_json from 1.0.95 to 1.0.96. ([\#15442](https://github.com/matrix-org/synapse/issues/15442)) -- Bump serde from 1.0.159 to 1.0.160. ([\#15443](https://github.com/matrix-org/synapse/issues/15443)) -- Bump pillow from 9.4.0 to 9.5.0. ([\#15444](https://github.com/matrix-org/synapse/issues/15444)) -- Bump furo from 2023.3.23 to 2023.3.27. ([\#15445](https://github.com/matrix-org/synapse/issues/15445)) -- Bump types-pyopenssl from 23.1.0.0 to 23.1.0.2. ([\#15446](https://github.com/matrix-org/synapse/issues/15446)) -- Bump mypy from 1.0.0 to 1.0.1. ([\#15447](https://github.com/matrix-org/synapse/issues/15447)) -- Bump psycopg2 from 2.9.5 to 2.9.6. ([\#15448](https://github.com/matrix-org/synapse/issues/15448)) -- Improve DB performance of clearing out old data from `stream_ordering_to_exterm`. ([\#15382](https://github.com/matrix-org/synapse/issues/15382), [\#15429](https://github.com/matrix-org/synapse/issues/15429)) -- Implement [MSC3989](https://github.com/matrix-org/matrix-spec-proposals/pull/3989) redaction algorithm. ([\#15393](https://github.com/matrix-org/synapse/issues/15393)) -- Implement [MSC2175](https://github.com/matrix-org/matrix-doc/pull/2175) to stop adding `creator` to create events. ([\#15394](https://github.com/matrix-org/synapse/issues/15394)) -- Implement [MSC2174](https://github.com/matrix-org/matrix-spec-proposals/pull/2174) to move the `redacts` key to a `content` property. ([\#15395](https://github.com/matrix-org/synapse/issues/15395)) -- Trust dtonlay/rust-toolchain in CI. ([\#15406](https://github.com/matrix-org/synapse/issues/15406)) -- Explicitly install Synapse during typechecking in CI. ([\#15409](https://github.com/matrix-org/synapse/issues/15409)) -- Only load the SSO redirect servlet if SSO is enabled. ([\#15421](https://github.com/matrix-org/synapse/issues/15421)) -- Refactor `SimpleHttpClient` to pull out a base class. ([\#15427](https://github.com/matrix-org/synapse/issues/15427)) -- Improve type hints. ([\#15432](https://github.com/matrix-org/synapse/issues/15432)) -- Convert async to normal tests in `TestSSOHandler`. ([\#15433](https://github.com/matrix-org/synapse/issues/15433)) -- Speed up the user directory background update. ([\#15435](https://github.com/matrix-org/synapse/issues/15435)) -- Disable directory listing for static resources in `/_matrix/static/`. ([\#15438](https://github.com/matrix-org/synapse/issues/15438)) -- Move various module API callback registration methods to a dedicated class. ([\#15453](https://github.com/matrix-org/synapse/issues/15453)) - - -Synapse 1.81.0 (2023-04-11) -=========================== - -Synapse now attempts the versioned appservice paths before falling back to the -[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). -Usage of the legacy routes should be considered deprecated. - -Additionally, Synapse has supported sending the application service access token -via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization) -since v1.70.0. For backwards compatibility it is *also* sent as the `access_token` -query parameter. This is insecure and should be considered deprecated. - -A future version of Synapse (v1.88.0 or later) will remove support for legacy -application service routes and query parameter authorization. - - -No significant changes since 1.81.0rc2. - - -Synapse 1.81.0rc2 (2023-04-06) -============================== - -Bugfixes --------- - -- Fix the `set_device_id_for_pushers_txn` background update crash. ([\#15391](https://github.com/matrix-org/synapse/issues/15391)) - - -Internal Changes ----------------- - -- Update CI to run complement under the latest stable go version. ([\#15403](https://github.com/matrix-org/synapse/issues/15403)) - - -Synapse 1.81.0rc1 (2023-04-04) -============================== - -Features --------- - -- Add the ability to enable/disable registrations when in the OIDC flow. ([\#14978](https://github.com/matrix-org/synapse/issues/14978)) -- Add a primitive helper script for listing worker endpoints. ([\#15243](https://github.com/matrix-org/synapse/issues/15243)) -- Experimental support for passing One Time Key and device key requests to application services ([MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) and [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984)). ([\#15314](https://github.com/matrix-org/synapse/issues/15314), [\#15321](https://github.com/matrix-org/synapse/issues/15321)) -- Allow loading `/password_policy` endpoint on workers. ([\#15331](https://github.com/matrix-org/synapse/issues/15331)) -- Add experimental support for Unix sockets. Contributed by Jason Little. ([\#15353](https://github.com/matrix-org/synapse/issues/15353)) -- Build Debian packages for Ubuntu 23.04 (Lunar Lobster). ([\#15381](https://github.com/matrix-org/synapse/issues/15381)) - - -Bugfixes --------- - -- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled. ([\#15295](https://github.com/matrix-org/synapse/issues/15295)) -- Fix a bug introduced in Synapse v1.55.0 which could delay remote homeservers being able to decrypt encrypted messages sent by local users. ([\#15297](https://github.com/matrix-org/synapse/issues/15297)) -- Add a check to [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite) - to ensure that the sqlite database passed to the script exists before trying to port from it. ([\#15306](https://github.com/matrix-org/synapse/issues/15306)) -- Fix a bug introduced in Synapse 1.76.0 where responses from worker deployments could include an internal `_INT_STREAM_POS` key. ([\#15309](https://github.com/matrix-org/synapse/issues/15309)) -- Fix a long-standing bug that Synpase only used the [legacy appservice routes](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). ([\#15317](https://github.com/matrix-org/synapse/issues/15317)) -- Fix a long-standing bug preventing users from rejoining rooms after being banned and unbanned over federation. Contributed by Nico. ([\#15323](https://github.com/matrix-org/synapse/issues/15323)) -- Fix bug in worker mode where on a rolling restart of workers the "typing" worker would consume 100% CPU until it got restarted. ([\#15332](https://github.com/matrix-org/synapse/issues/15332)) -- Fix a long-standing bug where some to_device messages could be dropped when using workers. ([\#15349](https://github.com/matrix-org/synapse/issues/15349)) -- Fix a bug introduced in Synapse 1.70.0 where the background sync from a faster join could spin for hours when one of the events involved had been marked for backoff. ([\#15351](https://github.com/matrix-org/synapse/issues/15351)) -- Fix missing app variable in mail subject for password resets. Contributed by Cyberes. ([\#15352](https://github.com/matrix-org/synapse/issues/15352)) -- Fix a rare bug introduced in Synapse 1.66.0 where initial syncs would fail when the user had been kicked from a faster joined room that had not finished syncing. ([\#15383](https://github.com/matrix-org/synapse/issues/15383)) - - -Improved Documentation ----------------------- - -- Fix a typo in login requests ratelimit defaults. ([\#15341](https://github.com/matrix-org/synapse/issues/15341)) -- Add some clarification to the doc/comments regarding TCP replication. ([\#15354](https://github.com/matrix-org/synapse/issues/15354)) -- Note that Synapse 1.74 queued a rebuild of the user directory tables. ([\#15386](https://github.com/matrix-org/synapse/issues/15386)) - - -Internal Changes ----------------- - -- Use `immutabledict` instead of `frozendict`. ([\#15113](https://github.com/matrix-org/synapse/issues/15113)) -- Add developer documentation for the Federation Sender and add a documentation mechanism using Sphinx. ([\#15265](https://github.com/matrix-org/synapse/issues/15265), [\#15336](https://github.com/matrix-org/synapse/issues/15336)) -- Make the pushers rely on the `device_id` instead of the `access_token_id` for various operations. ([\#15280](https://github.com/matrix-org/synapse/issues/15280)) -- Bump sentry-sdk from 1.15.0 to 1.17.0. ([\#15285](https://github.com/matrix-org/synapse/issues/15285)) -- Allow running the Twisted trunk job against other branches. ([\#15302](https://github.com/matrix-org/synapse/issues/15302)) -- Remind the releaser to ask for changelog feedback in [#synapse-dev](https://matrix.to/#/#synapse-dev:matrix.org). ([\#15303](https://github.com/matrix-org/synapse/issues/15303)) -- Bump dtolnay/rust-toolchain from e12eda571dc9a5ee5d58eecf4738ec291c66f295 to fc3253060d0c959bea12a59f10f8391454a0b02d. ([\#15304](https://github.com/matrix-org/synapse/issues/15304)) -- Reject events with an invalid "mentions" property per [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15311](https://github.com/matrix-org/synapse/issues/15311)) -- As an optimisation, use `TRUNCATE` on Postgres when clearing the user directory tables. ([\#15316](https://github.com/matrix-org/synapse/issues/15316)) -- Fix `.gitignore` rule for the Complement source tarball downloaded automatically by `complement.sh`. ([\#15319](https://github.com/matrix-org/synapse/issues/15319)) -- Bump serde from 1.0.157 to 1.0.158. ([\#15324](https://github.com/matrix-org/synapse/issues/15324)) -- Bump regex from 1.7.1 to 1.7.3. ([\#15325](https://github.com/matrix-org/synapse/issues/15325)) -- Bump types-pyopenssl from 23.0.0.4 to 23.1.0.0. ([\#15326](https://github.com/matrix-org/synapse/issues/15326)) -- Bump furo from 2022.12.7 to 2023.3.23. ([\#15327](https://github.com/matrix-org/synapse/issues/15327)) -- Bump ruff from 0.0.252 to 0.0.259. ([\#15328](https://github.com/matrix-org/synapse/issues/15328)) -- Bump cryptography from 40.0.0 to 40.0.1. ([\#15329](https://github.com/matrix-org/synapse/issues/15329)) -- Bump mypy-zope from 0.9.0 to 0.9.1. ([\#15330](https://github.com/matrix-org/synapse/issues/15330)) -- Speed up unit tests when using SQLite3. ([\#15334](https://github.com/matrix-org/synapse/issues/15334)) -- Speed up pydantic CI job. ([\#15339](https://github.com/matrix-org/synapse/issues/15339)) -- Speed up sample config CI job. ([\#15340](https://github.com/matrix-org/synapse/issues/15340)) -- Fix copyright year in SSO footer template. ([\#15358](https://github.com/matrix-org/synapse/issues/15358)) -- Bump peaceiris/actions-gh-pages from 3.9.2 to 3.9.3. ([\#15369](https://github.com/matrix-org/synapse/issues/15369)) -- Bump serde from 1.0.158 to 1.0.159. ([\#15370](https://github.com/matrix-org/synapse/issues/15370)) -- Bump serde_json from 1.0.94 to 1.0.95. ([\#15371](https://github.com/matrix-org/synapse/issues/15371)) -- Speed up membership queries for users with forgotten rooms. ([\#15385](https://github.com/matrix-org/synapse/issues/15385)) - - -Synapse 1.80.0 (2023-03-28) -=========================== - -No significant changes since 1.80.0rc2. - - -Synapse 1.80.0rc2 (2023-03-22) -============================== - -Bugfixes --------- - -- Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). ([\#15298](https://github.com/matrix-org/synapse/issues/15298), [\#15300](https://github.com/matrix-org/synapse/issues/15300)) -- Fix a bug introduced in Synapse 1.75.0rc1 where the [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite) - would fail to open the SQLite database. ([\#15301](https://github.com/matrix-org/synapse/issues/15301)) - - -Synapse 1.80.0rc1 (2023-03-21) -============================== - -Features --------- - -- Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. ([\#15187](https://github.com/matrix-org/synapse/issues/15187)) -- Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15249](https://github.com/matrix-org/synapse/issues/15249)) -- Allow loading `/register/available` endpoint on workers. ([\#15268](https://github.com/matrix-org/synapse/issues/15268)) -- Improve performance of creating and authenticating events. ([\#15195](https://github.com/matrix-org/synapse/issues/15195)) -- Add topic and name events to group of events that are batch persisted when creating a room. ([\#15229](https://github.com/matrix-org/synapse/issues/15229)) - - -Bugfixes --------- - -- Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. ([\#14755](https://github.com/matrix-org/synapse/issues/14755), [\#14756](https://github.com/matrix-org/synapse/issues/14756)) -- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. ([\#15190](https://github.com/matrix-org/synapse/issues/15190)) -- Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. ([\#15232](https://github.com/matrix-org/synapse/issues/15232)) -- Fix a long-standing error when sending message into deleted room. ([\#15235](https://github.com/matrix-org/synapse/issues/15235)) - - -Updates to the Docker image ---------------------------- - -- Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel. ([\#15239](https://github.com/matrix-org/synapse/issues/15239)) -- Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). ([\#15281](https://github.com/matrix-org/synapse/issues/15281), [\#15282](https://github.com/matrix-org/synapse/issues/15282)) - - -Improved Documentation ----------------------- - -- Add a missing endpoint to the workers documentation. ([\#15223](https://github.com/matrix-org/synapse/issues/15223)) - - -Internal Changes ----------------- - -- Add additional functionality to declaring worker types when starting Complement in worker mode. ([\#14921](https://github.com/matrix-org/synapse/issues/14921)) -- Add `Synapse-Trace-Id` to `access-control-expose-headers` header. ([\#14974](https://github.com/matrix-org/synapse/issues/14974)) -- Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. ([\#15200](https://github.com/matrix-org/synapse/issues/15200)) -- Improve log lines when purging rooms. ([\#15222](https://github.com/matrix-org/synapse/issues/15222)) -- Improve type hints. ([\#15230](https://github.com/matrix-org/synapse/issues/15230), [\#15231](https://github.com/matrix-org/synapse/issues/15231), [\#15238](https://github.com/matrix-org/synapse/issues/15238)) -- Move various module API callback registration methods to a dedicated class. ([\#15237](https://github.com/matrix-org/synapse/issues/15237)) -- Configure GitHub Actions for merge queues. ([\#15244](https://github.com/matrix-org/synapse/issues/15244)) -- Add schema comments about the `destinations` and `destination_rooms` tables. ([\#15247](https://github.com/matrix-org/synapse/issues/15247)) -- Skip processing of auto-join room behaviour if there are no auto-join rooms configured. ([\#15262](https://github.com/matrix-org/synapse/issues/15262)) -- Remove unused store method `_set_destination_retry_timings_emulated`. ([\#15266](https://github.com/matrix-org/synapse/issues/15266)) -- Reorganize URL preview code. ([\#15269](https://github.com/matrix-org/synapse/issues/15269)) -- Clean-up direct TCP replication code. ([\#15272](https://github.com/matrix-org/synapse/issues/15272), [\#15274](https://github.com/matrix-org/synapse/issues/15274)) -- Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. ([\#15275](https://github.com/matrix-org/synapse/issues/15275)) -- Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15293](https://github.com/matrix-org/synapse/issues/15293)) -- Bump hiredis from 2.2.1 to 2.2.2. ([\#15252](https://github.com/matrix-org/synapse/issues/15252)) -- Bump serde from 1.0.152 to 1.0.155. ([\#15253](https://github.com/matrix-org/synapse/issues/15253)) -- Bump pysaml2 from 7.2.1 to 7.3.1. ([\#15254](https://github.com/matrix-org/synapse/issues/15254)) -- Bump msgpack from 1.0.4 to 1.0.5. ([\#15255](https://github.com/matrix-org/synapse/issues/15255)) -- Bump gitpython from 3.1.30 to 3.1.31. ([\#15256](https://github.com/matrix-org/synapse/issues/15256)) -- Bump cryptography from 39.0.1 to 39.0.2. ([\#15257](https://github.com/matrix-org/synapse/issues/15257)) -- Bump pydantic from 1.10.4 to 1.10.6. ([\#15286](https://github.com/matrix-org/synapse/issues/15286)) -- Bump serde from 1.0.155 to 1.0.157. ([\#15287](https://github.com/matrix-org/synapse/issues/15287)) -- Bump anyhow from 1.0.69 to 1.0.70. ([\#15288](https://github.com/matrix-org/synapse/issues/15288)) -- Bump txredisapi from 1.4.7 to 1.4.9. ([\#15289](https://github.com/matrix-org/synapse/issues/15289)) -- Bump pygithub from 1.57 to 1.58.1. ([\#15290](https://github.com/matrix-org/synapse/issues/15290)) -- Bump types-requests from 2.28.11.12 to 2.28.11.15. ([\#15291](https://github.com/matrix-org/synapse/issues/15291)) - - - -Synapse 1.79.0 (2023-03-14) -=========================== - -No significant changes since 1.79.0rc2. - - -Synapse 1.79.0rc2 (2023-03-13) -============================== - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. ([\#15227](https://github.com/matrix-org/synapse/issues/15227)) -- Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. ([\#15248](https://github.com/matrix-org/synapse/issues/15248)) - - -Internal Changes ----------------- - -- Refactor `filter_events_for_server`. ([\#15240](https://github.com/matrix-org/synapse/issues/15240)) - - -Synapse 1.79.0rc1 (2023-03-07) -============================== - -Features --------- - -- Add two new Third Party Rules module API callbacks: [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier) and [`on_remove_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_remove_user_third_party_identifier). ([\#15044](https://github.com/matrix-org/synapse/issues/15044)) -- Experimental support for [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967) to not require UIA for setting up cross-signing on first use. ([\#15077](https://github.com/matrix-org/synapse/issues/15077)) -- Add media information to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.79/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#15107](https://github.com/matrix-org/synapse/issues/15107)) -- Add an [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) to delete a [specific event report](https://spec.matrix.org/v1.6/client-server-api/#reporting-content). ([\#15116](https://github.com/matrix-org/synapse/issues/15116)) -- Add support for knocking to workers. ([\#15133](https://github.com/matrix-org/synapse/issues/15133)) -- Allow use of the `/filter` Client-Server APIs on workers. ([\#15134](https://github.com/matrix-org/synapse/issues/15134)) -- Update support for [MSC2677](https://github.com/matrix-org/matrix-spec-proposals/pull/2677): remove support for server-side aggregation of reactions. ([\#15172](https://github.com/matrix-org/synapse/issues/15172)) -- Stabilise support for [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758): `event_property_is` push condition. ([\#15185](https://github.com/matrix-org/synapse/issues/15185)) - - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.75 that caused experimental support for deleting account data to raise an internal server error while using an account data writer worker. ([\#14869](https://github.com/matrix-org/synapse/issues/14869)) -- Fix a long-standing bug where Synapse handled an unspecced field on push rules. ([\#15088](https://github.com/matrix-org/synapse/issues/15088)) -- Fix a long-standing bug where a URL preview would break if the discovered oEmbed failed to download. ([\#15092](https://github.com/matrix-org/synapse/issues/15092)) -- Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. ([\#15163](https://github.com/matrix-org/synapse/issues/15163)) -- Add the `transaction_id` in the events included in many endpoints' responses. ([\#15174](https://github.com/matrix-org/synapse/issues/15174)) -- Fix a bug introduced in Synapse 1.78.0 where requests to claim dehydrated devices would fail with a `405` error. ([\#15180](https://github.com/matrix-org/synapse/issues/15180)) -- Stop applying edits when bundling aggregations, per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925). ([\#15193](https://github.com/matrix-org/synapse/issues/15193)) -- Fix a long-standing bug where the user directory search was not case-insensitive for accented characters. ([\#15143](https://github.com/matrix-org/synapse/issues/15143)) - - -Updates to the Docker image ---------------------------- - -- Improve startup logging in the with-workers Docker image. ([\#15186](https://github.com/matrix-org/synapse/issues/15186)) - - -Improved Documentation ----------------------- - -- Document how to use caches in a module. ([\#14026](https://github.com/matrix-org/synapse/issues/14026)) -- Clarify which worker processes the ThirdPartyRules' [`on_new_event`](https://matrix-org.github.io/synapse/v1.78/modules/third_party_rules_callbacks.html#on_new_event) module API callback runs on. ([\#15071](https://github.com/matrix-org/synapse/issues/15071)) -- Document using [Shibboleth](https://www.shibboleth.net/) as an OpenID Provider. ([\#15112](https://github.com/matrix-org/synapse/issues/15112)) -- Correct reference to `federation_verify_certificates` in configuration documentation. ([\#15139](https://github.com/matrix-org/synapse/issues/15139)) -- Correct small documentation errors in some `MatrixFederationHttpClient` methods. ([\#15148](https://github.com/matrix-org/synapse/issues/15148)) -- Correct the description of the behavior of `registration_shared_secret_path` on startup. ([\#15168](https://github.com/matrix-org/synapse/issues/15168)) - - -Deprecations and Removals -------------------------- - -- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044](https://github.com/matrix-org/synapse/issues/15044)) -- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093)) -- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189)) -- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137)) -- Remove unspecced and buggy `PUT` method on the unstable `/rooms/<room_id>/batch_send` endpoint. ([\#15199](https://github.com/matrix-org/synapse/issues/15199)) - - -Internal Changes ----------------- - -- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14101](https://github.com/matrix-org/synapse/issues/14101)) -- Batch up storing state groups when creating a new room. ([\#14918](https://github.com/matrix-org/synapse/issues/14918)) -- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15051](https://github.com/matrix-org/synapse/issues/15051)) -- Refactor writing json data in `FileExfiltrationWriter`. ([\#15095](https://github.com/matrix-org/synapse/issues/15095)) -- Tighten the login ratelimit defaults. ([\#15135](https://github.com/matrix-org/synapse/issues/15135)) -- Fix a typo in an experimental config setting. ([\#15138](https://github.com/matrix-org/synapse/issues/15138)) -- Refactor the media modules. ([\#15146](https://github.com/matrix-org/synapse/issues/15146), [\#15175](https://github.com/matrix-org/synapse/issues/15175)) -- Improve type hints. ([\#15164](https://github.com/matrix-org/synapse/issues/15164)) -- Move `get_event_report` and `get_event_reports_paginate` from `RoomStore` to `RoomWorkerStore`. ([\#15165](https://github.com/matrix-org/synapse/issues/15165)) -- Remove dangling reference to being a reference implementation in docstring. ([\#15167](https://github.com/matrix-org/synapse/issues/15167)) -- Add an option to force a rebuild of the "editable" complement image. ([\#15184](https://github.com/matrix-org/synapse/issues/15184)) -- Use nightly rustfmt in CI. ([\#15188](https://github.com/matrix-org/synapse/issues/15188)) -- Add a `get_next_txn` method to `StreamIdGenerator` to match `MultiWriterIdGenerator`. ([\#15191](https://github.com/matrix-org/synapse/issues/15191)) -- Combine `AbstractStreamIdTracker` and `AbstractStreamIdGenerator`. ([\#15192](https://github.com/matrix-org/synapse/issues/15192)) -- Automatically fix errors with `ruff`. ([\#15194](https://github.com/matrix-org/synapse/issues/15194)) -- Refactor database transaction for query users' devices to reduce database pool contention. ([\#15215](https://github.com/matrix-org/synapse/issues/15215)) -- Correct `test_icu_word_boundary_punctuation` so that it passes with the ICU versions available in Alpine and macOS. ([\#15177](https://github.com/matrix-org/synapse/issues/15177)) - -<details><summary>Locked dependency updates</summary> - - - Bump actions/checkout from 2 to 3. ([\#15155](https://github.com/matrix-org/synapse/issues/15155)) - - Bump black from 22.12.0 to 23.1.0. ([\#15103](https://github.com/matrix-org/synapse/issues/15103)) - - Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0. ([\#15152](https://github.com/matrix-org/synapse/issues/15152)) - - Bump docker/login-action from 1 to 2. ([\#15154](https://github.com/matrix-org/synapse/issues/15154)) - - Bump matrix-org/backend-meta from 1 to 2. ([\#15156](https://github.com/matrix-org/synapse/issues/15156)) - - Bump ruff from 0.0.237 to 0.0.252. ([\#15159](https://github.com/matrix-org/synapse/issues/15159)) - - Bump serde_json from 1.0.93 to 1.0.94. ([\#15214](https://github.com/matrix-org/synapse/issues/15214)) - - Bump types-commonmark from 0.9.2.1 to 0.9.2.2. ([\#15209](https://github.com/matrix-org/synapse/issues/15209)) - - Bump types-opentracing from 2.4.10.1 to 2.4.10.3. ([\#15158](https://github.com/matrix-org/synapse/issues/15158)) - - Bump types-pillow from 9.4.0.13 to 9.4.0.17. ([\#15211](https://github.com/matrix-org/synapse/issues/15211)) - - Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8. ([\#15210](https://github.com/matrix-org/synapse/issues/15210)) - - Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4. ([\#15213](https://github.com/matrix-org/synapse/issues/15213)) - - Bump types-setuptools from 67.3.0.1 to 67.4.0.3. ([\#15160](https://github.com/matrix-org/synapse/issues/15160)) - - Bump types-setuptools from 67.4.0.3 to 67.5.0.0. ([\#15212](https://github.com/matrix-org/synapse/issues/15212)) - - Bump typing-extensions from 4.4.0 to 4.5.0. ([\#15157](https://github.com/matrix-org/synapse/issues/15157)) -</details> - - -Synapse 1.78.0 (2023-02-28) -=========================== - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.76 where 5s delays would occasionally occur in deployments using workers. ([\#15150](https://github.com/matrix-org/synapse/issues/15150)) - - -Synapse 1.78.0rc1 (2023-02-21) -============================== - -Features --------- - -- Implement the experimental `exact_event_match` push rule condition from [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758). ([\#14964](https://github.com/matrix-org/synapse/issues/14964)) -- Add account data to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.78/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14969](https://github.com/matrix-org/synapse/issues/14969)) -- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to disambiguate push rule keys with dots in them. ([\#15004](https://github.com/matrix-org/synapse/issues/15004)) -- Allow Synapse to use a specific Redis [logical database](https://redis.io/commands/select/) in worker-mode deployments. ([\#15034](https://github.com/matrix-org/synapse/issues/15034)) -- Tag opentracing spans for federation requests with the name of the worker serving the request. ([\#15042](https://github.com/matrix-org/synapse/issues/15042)) -- Implement the experimental `exact_event_property_contains` push rule condition from [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966). ([\#15045](https://github.com/matrix-org/synapse/issues/15045)) -- Remove spurious `dont_notify` action from the defaults for the `.m.rule.reaction` pushrule. ([\#15073](https://github.com/matrix-org/synapse/issues/15073)) -- Update the error code returned when user sends a duplicate annotation. ([\#15075](https://github.com/matrix-org/synapse/issues/15075)) - - -Bugfixes --------- - -- Prevent clients from reporting nonexistent events. ([\#13779](https://github.com/matrix-org/synapse/issues/13779)) -- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14605](https://github.com/matrix-org/synapse/issues/14605)) -- Fix a long-standing bug where the room aliases returned could be corrupted. ([\#15038](https://github.com/matrix-org/synapse/issues/15038)) -- Fix a bug introduced in Synapse 1.76.0 where partially-joined rooms could not be deleted using the [purge room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api). ([\#15068](https://github.com/matrix-org/synapse/issues/15068)) -- Fix a long-standing bug where federated joins would fail if the first server in the list of servers to try is not in the room. ([\#15074](https://github.com/matrix-org/synapse/issues/15074)) -- Fix a bug introduced in Synapse v1.74.0 where searching with colons when using ICU for search term tokenisation would fail with an error. ([\#15079](https://github.com/matrix-org/synapse/issues/15079)) -- Reduce the likelihood of a rare race condition where rejoining a restricted room over federation would fail. ([\#15080](https://github.com/matrix-org/synapse/issues/15080)) -- Fix a bug introduced in Synapse 1.76 where workers would fail to start if the `health` listener was configured. ([\#15096](https://github.com/matrix-org/synapse/issues/15096)) -- Fix a bug introduced in Synapse 1.75 where the [portdb script](https://matrix-org.github.io/synapse/release-v1.78/postgres.html#porting-from-sqlite) would fail to run after a room had been faster-joined. ([\#15108](https://github.com/matrix-org/synapse/issues/15108)) - - -Improved Documentation ----------------------- - -- Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. ([\#14892](https://github.com/matrix-org/synapse/issues/14892), [\#15022](https://github.com/matrix-org/synapse/issues/15022)) -- Update delegation documentation to clarify that SRV DNS delegation does not eliminate all needs to serve files from .well-known locations. Contributed by @williamkray. ([\#14959](https://github.com/matrix-org/synapse/issues/14959)) -- Fix a mistake in registration_shared_secret_path docs. ([\#15078](https://github.com/matrix-org/synapse/issues/15078)) -- Refer to a more recent blog post on the [Database Maintenance Tools](https://matrix-org.github.io/synapse/latest/usage/administration/database_maintenance_tools.html) page. Contributed by @jahway603. ([\#15083](https://github.com/matrix-org/synapse/issues/15083)) - - -Internal Changes ----------------- - -- Re-type hint some collections as read-only. ([\#13755](https://github.com/matrix-org/synapse/issues/13755)) -- Faster joins: don't stall when another user joins during a partial-state room resync. ([\#14606](https://github.com/matrix-org/synapse/issues/14606)) -- Add a class `UnpersistedEventContext` to allow for the batching up of storing state groups. ([\#14675](https://github.com/matrix-org/synapse/issues/14675)) -- Add a check to ensure that locked dependencies have source distributions available. ([\#14742](https://github.com/matrix-org/synapse/issues/14742)) -- Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. ([\#14834](https://github.com/matrix-org/synapse/issues/14834)) -- Prevent `WARNING: there is already a transaction in progress` lines appearing in PostgreSQL's logs on some occasions. ([\#14840](https://github.com/matrix-org/synapse/issues/14840)) -- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14929](https://github.com/matrix-org/synapse/issues/14929)) -- Improve performance of `/sync` in a few situations. ([\#14973](https://github.com/matrix-org/synapse/issues/14973)) -- Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room. ([\#14977](https://github.com/matrix-org/synapse/issues/14977)) -- Skip calculating unread push actions in /sync when enable_push is false. ([\#14980](https://github.com/matrix-org/synapse/issues/14980)) -- Add a schema dump symlinks inside `contrib`, to make it easier for IDEs to interrogate Synapse's database schema. ([\#14982](https://github.com/matrix-org/synapse/issues/14982)) -- Improve type hints. ([\#15008](https://github.com/matrix-org/synapse/issues/15008), [\#15026](https://github.com/matrix-org/synapse/issues/15026), [\#15027](https://github.com/matrix-org/synapse/issues/15027), [\#15028](https://github.com/matrix-org/synapse/issues/15028), [\#15031](https://github.com/matrix-org/synapse/issues/15031), [\#15035](https://github.com/matrix-org/synapse/issues/15035), [\#15052](https://github.com/matrix-org/synapse/issues/15052), [\#15072](https://github.com/matrix-org/synapse/issues/15072), [\#15084](https://github.com/matrix-org/synapse/issues/15084)) -- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15037](https://github.com/matrix-org/synapse/issues/15037)) -- Avoid mutating a cached value in `get_user_devices_from_cache`. ([\#15040](https://github.com/matrix-org/synapse/issues/15040)) -- Fix a rare exception in logs on start up. ([\#15041](https://github.com/matrix-org/synapse/issues/15041)) -- Update pyo3-log to v0.8.1. ([\#15043](https://github.com/matrix-org/synapse/issues/15043)) -- Avoid mutating cached values in `_generate_sync_entry_for_account_data`. ([\#15047](https://github.com/matrix-org/synapse/issues/15047)) -- Refactor arguments of `try_unbind_threepid` and `_try_unbind_threepid_with_id_server` to not use dictionaries. ([\#15053](https://github.com/matrix-org/synapse/issues/15053)) -- Merge debug logging from the hotfixes branch. ([\#15054](https://github.com/matrix-org/synapse/issues/15054)) -- Faster joins: omit device list updates originating from partial state rooms in /sync responses without lazy loading of members enabled. ([\#15069](https://github.com/matrix-org/synapse/issues/15069)) -- Fix clashing database transaction name. ([\#15070](https://github.com/matrix-org/synapse/issues/15070)) -- Upper-bound frozendict dependency. This works around us being unable to test installing our wheels against Python 3.11 in CI. ([\#15114](https://github.com/matrix-org/synapse/issues/15114)) -- Tweak logging for when a worker waits for its view of a replication stream to catch up. ([\#15120](https://github.com/matrix-org/synapse/issues/15120)) - -<details><summary>Locked dependency updates</summary> - -- Bump bleach from 5.0.1 to 6.0.0. ([\#15059](https://github.com/matrix-org/synapse/issues/15059)) -- Bump cryptography from 38.0.4 to 39.0.1. ([\#15020](https://github.com/matrix-org/synapse/issues/15020)) -- Bump ruff version from 0.0.230 to 0.0.237. ([\#15033](https://github.com/matrix-org/synapse/issues/15033)) -- Bump dtolnay/rust-toolchain from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3. ([\#15060](https://github.com/matrix-org/synapse/issues/15060)) -- Bump systemd-python from 234 to 235. ([\#15061](https://github.com/matrix-org/synapse/issues/15061)) -- Bump serde_json from 1.0.92 to 1.0.93. ([\#15062](https://github.com/matrix-org/synapse/issues/15062)) -- Bump types-requests from 2.28.11.8 to 2.28.11.12. ([\#15063](https://github.com/matrix-org/synapse/issues/15063)) -- Bump types-pillow from 9.4.0.5 to 9.4.0.10. ([\#15064](https://github.com/matrix-org/synapse/issues/15064)) -- Bump sentry-sdk from 1.13.0 to 1.15.0. ([\#15065](https://github.com/matrix-org/synapse/issues/15065)) -- Bump types-jsonschema from 4.17.0.3 to 4.17.0.5. ([\#15099](https://github.com/matrix-org/synapse/issues/15099)) -- Bump types-bleach from 5.0.3.1 to 6.0.0.0. ([\#15100](https://github.com/matrix-org/synapse/issues/15100)) -- Bump dtolnay/rust-toolchain from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295. ([\#15101](https://github.com/matrix-org/synapse/issues/15101)) -- Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0. ([\#15102](https://github.com/matrix-org/synapse/issues/15102)) -- Bump types-pillow from 9.4.0.10 to 9.4.0.13. ([\#15104](https://github.com/matrix-org/synapse/issues/15104)) -- Bump types-setuptools from 67.1.0.0 to 67.3.0.1. ([\#15105](https://github.com/matrix-org/synapse/issues/15105)) - - -</details> - - -Synapse 1.77.0 (2023-02-14) -=========================== - -No significant changes since 1.77.0rc2. - - -Synapse 1.77.0rc2 (2023-02-10) -============================== - -Bugfixes --------- - -- Fix bug where retried replication requests would return a failure. Introduced in v1.76.0. ([\#15024](https://github.com/matrix-org/synapse/issues/15024)) - - -Internal Changes ----------------- - -- Prepare for future database schema changes. ([\#15036](https://github.com/matrix-org/synapse/issues/15036)) - - -Synapse 1.77.0rc1 (2023-02-07) -============================== - -Features --------- - -- Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#14823](https://github.com/matrix-org/synapse/issues/14823), [\#14943](https://github.com/matrix-org/synapse/issues/14943), [\#14957](https://github.com/matrix-org/synapse/issues/14957), [\#14958](https://github.com/matrix-org/synapse/issues/14958)) -- Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#14960](https://github.com/matrix-org/synapse/issues/14960), [\#15016](https://github.com/matrix-org/synapse/issues/15016)) -- Add profile information, devices and connections to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.77/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14894](https://github.com/matrix-org/synapse/issues/14894)) -- Improve performance when joining or sending an event in large rooms. ([\#14962](https://github.com/matrix-org/synapse/issues/14962)) -- Improve performance of joining and leaving large rooms with many local users. ([\#14971](https://github.com/matrix-org/synapse/issues/14971)) - - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.53.0 where `next_batch` tokens from `/sync` could not be used with the `/relations` endpoint. ([\#14866](https://github.com/matrix-org/synapse/issues/14866)) -- Fix a bug introduced in Synapse 1.35.0 where the module API's `send_local_online_presence_to` would fail to send presence updates over federation. ([\#14880](https://github.com/matrix-org/synapse/issues/14880)) -- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14915](https://github.com/matrix-org/synapse/issues/14915)) -- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14926](https://github.com/matrix-org/synapse/issues/14926)) -- Fix a bug introduced in Synapse 1.68.0 where we were unable to service remote joins in rooms with `@room` notification levels set to `null` in their (malformed) power levels. ([\#14942](https://github.com/matrix-org/synapse/issues/14942)) -- Fix a bug introduced in Synapse 1.64.0 where boolean power levels were erroneously permitted in [v10 rooms](https://spec.matrix.org/v1.5/rooms/v10/). ([\#14944](https://github.com/matrix-org/synapse/issues/14944)) -- Fix a long-standing bug where sending messages on servers with presence enabled would spam "Re-starting finished log context" log lines. ([\#14947](https://github.com/matrix-org/synapse/issues/14947)) -- Fix a bug introduced in Synapse 1.68.0 where logging from the Rust module was not properly logged. ([\#14976](https://github.com/matrix-org/synapse/issues/14976)) -- Fix various long-standing bugs in Synapse's config, event and request handling where booleans were unintentionally accepted where an integer was expected. ([\#14945](https://github.com/matrix-org/synapse/issues/14945)) - - -Internal Changes ----------------- - -- Add missing type hints. ([\#14879](https://github.com/matrix-org/synapse/issues/14879), [\#14886](https://github.com/matrix-org/synapse/issues/14886), [\#14887](https://github.com/matrix-org/synapse/issues/14887), [\#14904](https://github.com/matrix-org/synapse/issues/14904), [\#14927](https://github.com/matrix-org/synapse/issues/14927), [\#14956](https://github.com/matrix-org/synapse/issues/14956), [\#14983](https://github.com/matrix-org/synapse/issues/14983), [\#14984](https://github.com/matrix-org/synapse/issues/14984), [\#14985](https://github.com/matrix-org/synapse/issues/14985), [\#14987](https://github.com/matrix-org/synapse/issues/14987), [\#14988](https://github.com/matrix-org/synapse/issues/14988), [\#14990](https://github.com/matrix-org/synapse/issues/14990), [\#14991](https://github.com/matrix-org/synapse/issues/14991), [\#14992](https://github.com/matrix-org/synapse/issues/14992), [\#15007](https://github.com/matrix-org/synapse/issues/15007)) -- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14922](https://github.com/matrix-org/synapse/issues/14922)) -- Allow running the complement tests suites with the asyncio reactor enabled. ([\#14858](https://github.com/matrix-org/synapse/issues/14858)) -- Improve performance of `/sync` in a few situations. ([\#14908](https://github.com/matrix-org/synapse/issues/14908), [\#14970](https://github.com/matrix-org/synapse/issues/14970)) -- Document how to handle Dependabot pull requests. ([\#14916](https://github.com/matrix-org/synapse/issues/14916)) -- Fix typo in release script. ([\#14920](https://github.com/matrix-org/synapse/issues/14920)) -- Update build system requirements to allow building with poetry-core 1.5.0. ([\#14949](https://github.com/matrix-org/synapse/issues/14949), [\#15019](https://github.com/matrix-org/synapse/issues/15019)) -- Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. ([\#14953](https://github.com/matrix-org/synapse/issues/14953)) -- Faster joins: Refactor internal handling of servers in room to never store an empty list. ([\#14954](https://github.com/matrix-org/synapse/issues/14954)) -- Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. ([\#14950](https://github.com/matrix-org/synapse/issues/14950)) -- Allow running `cargo` without the `extension-module` option. ([\#14965](https://github.com/matrix-org/synapse/issues/14965)) -- Preparatory work for adding a denormalised event stream ordering column in the future. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979), [9cd7610](https://github.com/matrix-org/synapse/commit/9cd7610f86ab5051c9365dd38d1eec405a5f8ca6), [f10caa7](https://github.com/matrix-org/synapse/commit/f10caa73eee0caa91cf373966104d1ededae2aee); see [\#15014](https://github.com/matrix-org/synapse/issues/15014)) -- Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002)) - -<details><summary>Locked dependency updates</summary> - -- Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968)) -- Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952)) -- Bump ijson from 3.1.4 to 3.2.0.post0. ([\#14935](https://github.com/matrix-org/synapse/issues/14935)) -- Bump types-pyyaml from 6.0.12.2 to 6.0.12.3. ([\#14936](https://github.com/matrix-org/synapse/issues/14936)) -- Bump types-jsonschema from 4.17.0.2 to 4.17.0.3. ([\#14937](https://github.com/matrix-org/synapse/issues/14937)) -- Bump types-pillow from 9.4.0.3 to 9.4.0.5. ([\#14938](https://github.com/matrix-org/synapse/issues/14938)) -- Bump hiredis from 2.0.0 to 2.1.1. ([\#14939](https://github.com/matrix-org/synapse/issues/14939)) -- Bump hiredis from 2.1.1 to 2.2.1. ([\#14993](https://github.com/matrix-org/synapse/issues/14993)) -- Bump types-setuptools from 65.6.0.3 to 67.1.0.0. ([\#14994](https://github.com/matrix-org/synapse/issues/14994)) -- Bump prometheus-client from 0.15.0 to 0.16.0. ([\#14995](https://github.com/matrix-org/synapse/issues/14995)) -- Bump anyhow from 1.0.68 to 1.0.69. ([\#14996](https://github.com/matrix-org/synapse/issues/14996)) -- Bump serde_json from 1.0.91 to 1.0.92. ([\#14997](https://github.com/matrix-org/synapse/issues/14997)) -- Bump isort from 5.11.4 to 5.11.5. ([\#14998](https://github.com/matrix-org/synapse/issues/14998)) -- Bump phonenumbers from 8.13.4 to 8.13.5. ([\#14999](https://github.com/matrix-org/synapse/issues/14999)) -</details> - -Synapse 1.76.0 (2023-01-31) -=========================== - -The 1.76 release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#faster-joins-are-enabled-by-default) for more details. - -The upgrade from 1.75 to 1.76 changes the account data replication streams in a backwards-incompatible manner. Server operators running a multi-worker deployment should consult [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#changes-to-the-account-data-replication-streams). - -Those who are `poetry install`ing from source using our lockfile should ensure their poetry version is 1.3.2 or higher; [see upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#minimum-version-of-poetry-is-now-132). - - -Notes on faster joins ---------------------- - -The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms. - -After a faster join, Synapse considers that room "partially joined". In this state, you should be able to - -- read incoming messages; -- see incoming state changes, e.g. room topic changes; and -- send messages, if the room is unencrypted. - -Synapse has to spend more effort to complete the join in the background. Once this finishes, you will be able to - -- send messages, if the room is in encrypted; -- retrieve room history from before your join, if permitted by the room settings; and -- access the full list of room members. - - -Improved Documentation ----------------------- - -- Describe the ideas and the internal machinery behind faster joins. ([\#14677](https://github.com/matrix-org/synapse/issues/14677)) - - -Synapse 1.76.0rc2 (2023-01-27) -============================== - -Bugfixes --------- - -- Faster joins: Fix a bug introduced in Synapse 1.69 where device list EDUs could fail to be handled after a restart when a faster join sync is in progress. ([\#14914](https://github.com/matrix-org/synapse/issues/14914)) - - -Internal Changes ----------------- - -- Faster joins: Improve performance of looking up partial-state status of rooms. ([\#14917](https://github.com/matrix-org/synapse/issues/14917)) - - -Synapse 1.76.0rc1 (2023-01-25) -============================== - -Features --------- - -- Update the default room version to [v10](https://spec.matrix.org/v1.5/rooms/v10/) ([MSC 3904](https://github.com/matrix-org/matrix-spec-proposals/pull/3904)). Contributed by @FSG-Cat. ([\#14111](https://github.com/matrix-org/synapse/issues/14111)) -- Add a `set_displayname()` method to the module API for setting a user's display name. ([\#14629](https://github.com/matrix-org/synapse/issues/14629)) -- Add a dedicated listener configuration for `health` endpoint. ([\#14747](https://github.com/matrix-org/synapse/issues/14747)) -- Implement support for [MSC3890](https://github.com/matrix-org/matrix-spec-proposals/pull/3890): Remotely silence local notifications. ([\#14775](https://github.com/matrix-org/synapse/issues/14775)) -- Implement experimental support for [MSC3930](https://github.com/matrix-org/matrix-spec-proposals/pull/3930): Push rules for ([MSC3381](https://github.com/matrix-org/matrix-spec-proposals/pull/3381)) Polls. ([\#14787](https://github.com/matrix-org/synapse/issues/14787)) -- Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement. ([\#14811](https://github.com/matrix-org/synapse/issues/14811)) -- Faster joins: always serve a partial join response to servers that request it with the stable query param. ([\#14839](https://github.com/matrix-org/synapse/issues/14839)) -- Faster joins: allow non-lazy-loading ("eager") syncs to complete after a partial join by omitting partial state rooms until they become fully stated. ([\#14870](https://github.com/matrix-org/synapse/issues/14870)) -- Faster joins: request partial joins by default. Admins can opt-out of this for the time being---see the upgrade notes. ([\#14905](https://github.com/matrix-org/synapse/issues/14905)) - - -Bugfixes --------- - -- Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room. ([\#14799](https://github.com/matrix-org/synapse/issues/14799)) -- Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. ([\#14812](https://github.com/matrix-org/synapse/issues/14812), [\#14842](https://github.com/matrix-org/synapse/issues/14842)) -- Fix rare races when using workers. ([\#14820](https://github.com/matrix-org/synapse/issues/14820)) -- Fix a bug introduced in Synapse 1.64.0 when using room version 10 with frozen events enabled. ([\#14864](https://github.com/matrix-org/synapse/issues/14864)) -- Fix a long-standing bug where the `populate_room_stats` background job could fail on broken rooms. ([\#14873](https://github.com/matrix-org/synapse/issues/14873)) -- Faster joins: Fix a bug in worker deployments where the room stats and user directory would not get updated when finishing a fast join until another event is sent or received. ([\#14874](https://github.com/matrix-org/synapse/issues/14874)) -- Faster joins: Fix incompatibility with joins into restricted rooms where no local users have the ability to invite. ([\#14882](https://github.com/matrix-org/synapse/issues/14882)) -- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14910](https://github.com/matrix-org/synapse/issues/14910)) - - -Updates to the Docker image ---------------------------- - -- Bump default Python version in the Dockerfile from 3.9 to 3.11. ([\#14875](https://github.com/matrix-org/synapse/issues/14875)) - - -Improved Documentation ----------------------- - -- Include `x_forwarded` entry in the HTTP listener example configs and remove the remaining `worker_main_http_uri` entries. ([\#14667](https://github.com/matrix-org/synapse/issues/14667)) -- Remove duplicate commands from the Code Style documentation page; point to the Contributing Guide instead. ([\#14773](https://github.com/matrix-org/synapse/issues/14773)) -- Add missing documentation for `tag` to `listeners` section. ([\#14803](https://github.com/matrix-org/synapse/issues/14803)) -- Updated documentation in configuration manual for `user_directory.search_all_users`. ([\#14818](https://github.com/matrix-org/synapse/issues/14818)) -- Add `worker_manhole` to configuration manual. ([\#14824](https://github.com/matrix-org/synapse/issues/14824)) -- Fix the example config missing the `id` field in [application service documentation](https://matrix-org.github.io/synapse/latest/application_services.html). ([\#14845](https://github.com/matrix-org/synapse/issues/14845)) -- Minor corrections to the logging configuration documentation. ([\#14868](https://github.com/matrix-org/synapse/issues/14868)) -- Document the export user data command. Contributed by @thezaidbintariq. ([\#14883](https://github.com/matrix-org/synapse/issues/14883)) - - -Deprecations and Removals -------------------------- - -- Poetry 1.3.2 or higher is now required when `poetry install`ing from source. ([\#14860](https://github.com/matrix-org/synapse/issues/14860)) - - -Internal Changes ----------------- - -- Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state. ([\#14749](https://github.com/matrix-org/synapse/issues/14749)) -- Enable Complement tests for Faster Remote Room Joins against worker-mode Synapse. ([\#14752](https://github.com/matrix-org/synapse/issues/14752)) -- Add some clarifying comments and refactor a portion of the `Keyring` class for readability. ([\#14804](https://github.com/matrix-org/synapse/issues/14804)) -- Add local poetry config files (`poetry.toml`) to `.gitignore`. ([\#14807](https://github.com/matrix-org/synapse/issues/14807)) -- Add missing type hints. ([\#14816](https://github.com/matrix-org/synapse/issues/14816), [\#14885](https://github.com/matrix-org/synapse/issues/14885), [\#14889](https://github.com/matrix-org/synapse/issues/14889)) -- Refactor push tests. ([\#14819](https://github.com/matrix-org/synapse/issues/14819)) -- Re-enable some linting that was disabled when we switched to ruff. ([\#14821](https://github.com/matrix-org/synapse/issues/14821)) -- Add `cargo fmt` and `cargo clippy` to the lint script. ([\#14822](https://github.com/matrix-org/synapse/issues/14822)) -- Drop unused table `presence`. ([\#14825](https://github.com/matrix-org/synapse/issues/14825)) -- Merge the two account data and the two device list replication streams. ([\#14826](https://github.com/matrix-org/synapse/issues/14826), [\#14833](https://github.com/matrix-org/synapse/issues/14833)) -- Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#14832](https://github.com/matrix-org/synapse/issues/14832), [\#14841](https://github.com/matrix-org/synapse/issues/14841)) -- Add a parameter to control whether the federation client performs a partial state join. ([\#14843](https://github.com/matrix-org/synapse/issues/14843)) -- Add check to avoid starting duplicate partial state syncs. ([\#14844](https://github.com/matrix-org/synapse/issues/14844)) -- Add an early return when handling no-op presence updates. ([\#14855](https://github.com/matrix-org/synapse/issues/14855)) -- Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. ([\#14856](https://github.com/matrix-org/synapse/issues/14856), [\#14872](https://github.com/matrix-org/synapse/issues/14872)) -- Always notify replication when a stream advances automatically. ([\#14877](https://github.com/matrix-org/synapse/issues/14877)) -- Reduce max time we wait for stream positions. ([\#14881](https://github.com/matrix-org/synapse/issues/14881)) -- Faster joins: allow the resync process more time to fetch `/state` ids. ([\#14912](https://github.com/matrix-org/synapse/issues/14912)) -- Bump regex from 1.7.0 to 1.7.1. ([\#14848](https://github.com/matrix-org/synapse/issues/14848)) -- Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2. ([\#14861](https://github.com/matrix-org/synapse/issues/14861)) -- Bump ruff from 0.0.215 to 0.0.224. ([\#14862](https://github.com/matrix-org/synapse/issues/14862)) -- Bump types-pillow from 9.4.0.0 to 9.4.0.3. ([\#14863](https://github.com/matrix-org/synapse/issues/14863)) -- Bump types-opentracing from 2.4.10 to 2.4.10.1. ([\#14896](https://github.com/matrix-org/synapse/issues/14896)) -- Bump ruff from 0.0.224 to 0.0.230. ([\#14897](https://github.com/matrix-org/synapse/issues/14897)) -- Bump types-requests from 2.28.11.7 to 2.28.11.8. ([\#14899](https://github.com/matrix-org/synapse/issues/14899)) -- Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4. ([\#14900](https://github.com/matrix-org/synapse/issues/14900)) -- Bump types-commonmark from 0.9.2 to 0.9.2.1. ([\#14901](https://github.com/matrix-org/synapse/issues/14901)) - - -Synapse 1.75.0 (2023-01-17) -=========================== - -No significant changes since 1.75.0rc2. - - -Synapse 1.75.0rc2 (2023-01-12) -============================== - -Bugfixes --------- - -- Fix a bug introduced in Synapse 1.75.0rc1 where device lists could be miscalculated with some sync filters. ([\#14810](https://github.com/matrix-org/synapse/issues/14810)) -- Fix race where calling `/members` or `/state` with an `at` parameter could fail for newly created rooms, when using multiple workers. ([\#14817](https://github.com/matrix-org/synapse/issues/14817)) - - -Synapse 1.75.0rc1 (2023-01-10) -============================== - -Features --------- - -- Add a `cached` function to `synapse.module_api` that returns a decorator to cache return values of functions. ([\#14663](https://github.com/matrix-org/synapse/issues/14663)) -- Add experimental support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) (removing account data). ([\#14714](https://github.com/matrix-org/synapse/issues/14714)) -- Support [RFC7636](https://datatracker.ietf.org/doc/html/rfc7636) Proof Key for Code Exchange for OAuth single sign-on. ([\#14750](https://github.com/matrix-org/synapse/issues/14750)) -- Support non-OpenID compliant userinfo claims for subject and picture. ([\#14753](https://github.com/matrix-org/synapse/issues/14753)) -- Improve performance of `/sync` when filtering all rooms, message types, or senders. ([\#14786](https://github.com/matrix-org/synapse/issues/14786)) -- Improve performance of the `/hierarchy` endpoint. ([\#14263](https://github.com/matrix-org/synapse/issues/14263)) - - -Bugfixes --------- - -- Fix the *MAU Limits* section of the Grafana dashboard relying on a specific `job` name for the workers of a Synapse deployment. ([\#14644](https://github.com/matrix-org/synapse/issues/14644)) -- Fix a bug introduced in Synapse 1.70.0 which could cause spurious `UNIQUE constraint failed` errors in the `rotate_notifs` background job. ([\#14669](https://github.com/matrix-org/synapse/issues/14669)) -- Ensure stream IDs are always updated after caches get invalidated with workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14723](https://github.com/matrix-org/synapse/issues/14723)) -- Remove the unspecced `device` field from `/pushrules` responses. ([\#14727](https://github.com/matrix-org/synapse/issues/14727)) -- Fix a bug introduced in Synapse 1.73.0 where the `picture_claim` configured under `oidc_providers` was unused (the default value of `"picture"` was used instead). ([\#14751](https://github.com/matrix-org/synapse/issues/14751)) -- Unescape HTML entities in URL preview titles making use of oEmbed responses. ([\#14781](https://github.com/matrix-org/synapse/issues/14781)) -- Disable sending confirmation email when 3pid is disabled. ([\#14725](https://github.com/matrix-org/synapse/issues/14725)) - - -Improved Documentation ----------------------- - -- Declare support for Python 3.11. ([\#14673](https://github.com/matrix-org/synapse/issues/14673)) -- Fix `target_memory_usage` being used in the description for the actual `cache_autotune` sub-option `target_cache_memory_usage`. ([\#14674](https://github.com/matrix-org/synapse/issues/14674)) -- Move `email` to Server section in config file documentation. ([\#14730](https://github.com/matrix-org/synapse/issues/14730)) -- Fix broken links in the Synapse documentation. ([\#14744](https://github.com/matrix-org/synapse/issues/14744)) -- Add missing worker settings to shared configuration documentation. ([\#14748](https://github.com/matrix-org/synapse/issues/14748)) -- Document using Twitter as a OAuth 2.0 authentication provider. ([\#14778](https://github.com/matrix-org/synapse/issues/14778)) -- Fix Synapse 1.74 upgrade notes to correctly explain how to install pyICU when installing Synapse from PyPI. ([\#14797](https://github.com/matrix-org/synapse/issues/14797)) -- Update link to towncrier in contribution guide. ([\#14801](https://github.com/matrix-org/synapse/issues/14801)) -- Use `htmltest` to check links in the Synapse documentation. ([\#14743](https://github.com/matrix-org/synapse/issues/14743)) - - -Internal Changes ----------------- - -- Faster remote room joins: stream the un-partial-stating of events over replication. ([\#14545](https://github.com/matrix-org/synapse/issues/14545), [\#14546](https://github.com/matrix-org/synapse/issues/14546)) -- Use [ruff](https://github.com/charliermarsh/ruff/) instead of flake8. ([\#14633](https://github.com/matrix-org/synapse/issues/14633), [\#14741](https://github.com/matrix-org/synapse/issues/14741)) -- Change `handle_new_client_event` signature so that a 429 does not reach clients on `PartialStateConflictError`, and internally retry when needed instead. ([\#14665](https://github.com/matrix-org/synapse/issues/14665)) -- Remove dependency on jQuery on reCAPTCHA page. ([\#14672](https://github.com/matrix-org/synapse/issues/14672)) -- Faster joins: make `compute_state_after_events` consistent with other state-fetching functions that take a `StateFilter`. ([\#14676](https://github.com/matrix-org/synapse/issues/14676)) -- Add missing type hints. ([\#14680](https://github.com/matrix-org/synapse/issues/14680), [\#14681](https://github.com/matrix-org/synapse/issues/14681), [\#14687](https://github.com/matrix-org/synapse/issues/14687)) -- Improve type annotations for the helper methods on a `CachedFunction`. ([\#14685](https://github.com/matrix-org/synapse/issues/14685)) -- Check that the SQLite database file exists before porting to PostgreSQL. ([\#14692](https://github.com/matrix-org/synapse/issues/14692)) -- Add `.direnv/` directory to .gitignore to prevent local state generated by the [direnv](https://direnv.net/) development tool from being committed. ([\#14707](https://github.com/matrix-org/synapse/issues/14707)) -- Batch up replication requests to request the resyncing of remote users's devices. ([\#14716](https://github.com/matrix-org/synapse/issues/14716)) -- If debug logging is enabled, log the `msgid`s of any to-device messages that are returned over `/sync`. ([\#14724](https://github.com/matrix-org/synapse/issues/14724)) -- Change GHA CI job to follow best practices. ([\#14772](https://github.com/matrix-org/synapse/issues/14772)) -- Switch to our fork of `dh-virtualenv` to work around an upstream Python 3.11 incompatibility. ([\#14774](https://github.com/matrix-org/synapse/issues/14774)) -- Skip testing built wheels for PyPy 3.7 on Linux x86_64 as we lack new required dependencies in the build environment. ([\#14802](https://github.com/matrix-org/synapse/issues/14802)) - -### Dependabot updates - -<details> - -- Bump JasonEtco/create-an-issue from 2.8.1 to 2.8.2. ([\#14693](https://github.com/matrix-org/synapse/issues/14693)) -- Bump anyhow from 1.0.66 to 1.0.68. ([\#14694](https://github.com/matrix-org/synapse/issues/14694)) -- Bump blake2 from 0.10.5 to 0.10.6. ([\#14695](https://github.com/matrix-org/synapse/issues/14695)) -- Bump serde_json from 1.0.89 to 1.0.91. ([\#14696](https://github.com/matrix-org/synapse/issues/14696)) -- Bump serde from 1.0.150 to 1.0.151. ([\#14697](https://github.com/matrix-org/synapse/issues/14697)) -- Bump lxml from 4.9.1 to 4.9.2. ([\#14698](https://github.com/matrix-org/synapse/issues/14698)) -- Bump types-jsonschema from 4.17.0.1 to 4.17.0.2. ([\#14700](https://github.com/matrix-org/synapse/issues/14700)) -- Bump sentry-sdk from 1.11.1 to 1.12.0. ([\#14701](https://github.com/matrix-org/synapse/issues/14701)) -- Bump types-setuptools from 65.6.0.1 to 65.6.0.2. ([\#14702](https://github.com/matrix-org/synapse/issues/14702)) -- Bump minimum PyYAML to 3.13. ([\#14720](https://github.com/matrix-org/synapse/issues/14720)) -- Bump JasonEtco/create-an-issue from 2.8.2 to 2.9.1. ([\#14731](https://github.com/matrix-org/synapse/issues/14731)) -- Bump towncrier from 22.8.0 to 22.12.0. ([\#14732](https://github.com/matrix-org/synapse/issues/14732)) -- Bump isort from 5.10.1 to 5.11.4. ([\#14733](https://github.com/matrix-org/synapse/issues/14733)) -- Bump attrs from 22.1.0 to 22.2.0. ([\#14734](https://github.com/matrix-org/synapse/issues/14734)) -- Bump black from 22.10.0 to 22.12.0. ([\#14735](https://github.com/matrix-org/synapse/issues/14735)) -- Bump sentry-sdk from 1.12.0 to 1.12.1. ([\#14736](https://github.com/matrix-org/synapse/issues/14736)) -- Bump setuptools from 65.3.0 to 65.5.1. ([\#14738](https://github.com/matrix-org/synapse/issues/14738)) -- Bump serde from 1.0.151 to 1.0.152. ([\#14758](https://github.com/matrix-org/synapse/issues/14758)) -- Bump ruff from 0.0.189 to 0.0.206. ([\#14759](https://github.com/matrix-org/synapse/issues/14759)) -- Bump pydantic from 1.10.2 to 1.10.4. ([\#14760](https://github.com/matrix-org/synapse/issues/14760)) -- Bump gitpython from 3.1.29 to 3.1.30. ([\#14761](https://github.com/matrix-org/synapse/issues/14761)) -- Bump pillow from 9.3.0 to 9.4.0. ([\#14762](https://github.com/matrix-org/synapse/issues/14762)) -- Bump types-requests from 2.28.11.5 to 2.28.11.7. ([\#14763](https://github.com/matrix-org/synapse/issues/14763)) -- Bump dawidd6/action-download-artifact from 2.24.2 to 2.24.3. ([\#14779](https://github.com/matrix-org/synapse/issues/14779)) -- Bump peaceiris/actions-gh-pages from 3.9.0 to 3.9.1. ([\#14791](https://github.com/matrix-org/synapse/issues/14791)) -- Bump types-pillow from 9.3.0.4 to 9.4.0.0. ([\#14792](https://github.com/matrix-org/synapse/issues/14792)) -- Bump pyopenssl from 22.1.0 to 23.0.0. ([\#14793](https://github.com/matrix-org/synapse/issues/14793)) -- Bump types-setuptools from 65.6.0.2 to 65.6.0.3. ([\#14794](https://github.com/matrix-org/synapse/issues/14794)) -- Bump importlib-metadata from 4.2.0 to 6.0.0. ([\#14795](https://github.com/matrix-org/synapse/issues/14795)) -- Bump ruff from 0.0.206 to 0.0.215. ([\#14796](https://github.com/matrix-org/synapse/issues/14796)) -</details> +* Bump anyhow from 1.0.93 to 1.0.95. ([\#18012](https://github.com/element-hq/synapse/issues/18012), [\#18045](https://github.com/element-hq/synapse/issues/18045)) +* Bump authlib from 1.3.2 to 1.4.0. ([\#18048](https://github.com/element-hq/synapse/issues/18048)) +* Bump dawidd6/action-download-artifact from 6 to 7. ([\#17981](https://github.com/element-hq/synapse/issues/17981)) +* Bump http from 1.1.0 to 1.2.0. ([\#18013](https://github.com/element-hq/synapse/issues/18013)) +- Bump mypy from 1.11.2 to 1.12.1. ([\#17999](https://github.com/element-hq/synapse/issues/17999)) +* Bump mypy-zope from 1.0.8 to 1.0.9. ([\#18047](https://github.com/element-hq/synapse/issues/18047)) +* Bump pillow from 10.4.0 to 11.0.0. ([\#18015](https://github.com/element-hq/synapse/issues/18015)) +* Bump pydantic from 2.9.2 to 2.10.3. ([\#18014](https://github.com/element-hq/synapse/issues/18014)) +* Bump pyicu from 2.13.1 to 2.14. ([\#18060](https://github.com/element-hq/synapse/issues/18060)) +* Bump pyo3 from 0.23.2 to 0.23.3. ([\#18001](https://github.com/element-hq/synapse/issues/18001)) +* Bump python-multipart from 0.0.16 to 0.0.18. ([\#17985](https://github.com/element-hq/synapse/issues/17985)) +* Bump sentry-sdk from 2.17.0 to 2.19.2. ([\#18061](https://github.com/element-hq/synapse/issues/18061)) +* Bump serde from 1.0.215 to 1.0.217. ([\#18031](https://github.com/element-hq/synapse/issues/18031), [\#18059](https://github.com/element-hq/synapse/issues/18059)) +* Bump serde_json from 1.0.133 to 1.0.134. ([\#18044](https://github.com/element-hq/synapse/issues/18044)) +* Bump twine from 5.1.1 to 6.0.1. ([\#18049](https://github.com/element-hq/synapse/issues/18049)) **Changelogs for older versions can be found [here](docs/changelogs/).** diff --git a/Cargo.lock b/Cargo.lock
index ce5520436d..980dff6987 100644 --- a/Cargo.lock +++ b/Cargo.lock
@@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] name = "arc-swap" @@ -37,9 +37,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "blake2" @@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytes" -version = "1.7.1" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cfg-if" @@ -125,15 +125,14 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" dependencies = [ "cfg-if", - "js-sys", "libc", "wasi", - "wasm-bindgen", + "windows-targets", ] [[package]] @@ -162,9 +161,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hex" @@ -174,9 +173,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "1.1.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -223,20 +222,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" [[package]] -name = "lock_api" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] name = "log" -version = "0.4.22" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "memchr" @@ -266,29 +255,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] -name = "parking_lot" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets", -] - -[[package]] name = "portable-atomic" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -302,25 +268,25 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] [[package]] name = "pyo3" -version = "0.21.2" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8" +checksum = "e5203598f366b11a02b13aa20cab591229ff0a89fd121a308a5df751d5fc9219" dependencies = [ "anyhow", "cfg-if", "indoc", "libc", "memoffset", - "parking_lot", + "once_cell", "portable-atomic", "pyo3-build-config", "pyo3-ffi", @@ -330,9 +296,9 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.21.2" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50" +checksum = "99636d423fa2ca130fa5acde3059308006d46f98caac629418e53f7ebb1e9999" dependencies = [ "once_cell", "target-lexicon", @@ -340,9 +306,9 @@ dependencies = [ [[package]] name = "pyo3-ffi" -version = "0.21.2" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403" +checksum = "78f9cf92ba9c409279bc3305b5409d90db2d2c22392d443a87df3a1adad59e33" dependencies = [ "libc", "pyo3-build-config", @@ -350,9 +316,9 @@ dependencies = [ [[package]] name = "pyo3-log" -version = "0.10.0" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2af49834b8d2ecd555177e63b273b708dea75150abc6f5341d0a6e1a9623976c" +checksum = "45192e5e4a4d2505587e27806c7b710c231c40c56f3bfc19535d0bb25df52264" dependencies = [ "arc-swap", "log", @@ -361,9 +327,9 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.21.2" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c" +checksum = "0b999cb1a6ce21f9a6b147dcf1be9ffedf02e0043aec74dc390f3007047cecd9" dependencies = [ "proc-macro2", "pyo3-macros-backend", @@ -373,9 +339,9 @@ dependencies = [ [[package]] name = "pyo3-macros-backend" -version = "0.21.2" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c" +checksum = "822ece1c7e1012745607d5cf0bcb2874769f0f7cb34c4cde03b9358eb9ef911a" dependencies = [ "heck", "proc-macro2", @@ -386,9 +352,9 @@ dependencies = [ [[package]] name = "pythonize" -version = "0.21.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0664248812c38cc55a4ed07f88e4df516ce82604b93b1ffdc041aa77a6cb3c" +checksum = "d5bcac0d0b71821f0d69e42654f1e15e5c94b85196446c4de9588951a2117e7b" dependencies = [ "pyo3", "serde", @@ -405,20 +371,20 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.5" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ - "libc", "rand_chacha", "rand_core", + "zerocopy", ] [[package]] name = "rand_chacha" -version = "0.3.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", "rand_core", @@ -426,27 +392,19 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.4" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" dependencies = [ "getrandom", -] - -[[package]] -name = "redox_syscall" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" -dependencies = [ - "bitflags", + "zerocopy", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", @@ -456,9 +414,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -467,9 +425,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "ryu" @@ -478,25 +436,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] name = "serde" -version = "1.0.204" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", @@ -505,9 +457,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.122" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", @@ -528,9 +480,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -538,12 +490,6 @@ dependencies = [ ] [[package]] -name = "smallvec" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" - -[[package]] name = "subtle" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -551,9 +497,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" -version = "2.0.61" +version = "2.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9" +checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" dependencies = [ "proc-macro2", "quote", @@ -586,9 +532,9 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.12.14" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" +checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" [[package]] name = "typenum" @@ -598,11 +544,10 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ulid" -version = "1.1.3" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f903f293d11f31c0c29e4148f6dc0d033a7f80cebc0282bea147611667d289" +checksum = "470dbf6591da1b39d43c14523b2b469c86879a53e8b758c8e090a470fe7b1fbe" dependencies = [ - "getrandom", "rand", "web-time", ] @@ -627,9 +572,12 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.13.3+wasi-0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] [[package]] name = "wasm-bindgen" @@ -697,9 +645,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -713,48 +661,77 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags", +] + +[[package]] +name = "zerocopy" +version = "0.8.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa91407dacce3a68c56de03abe2760159582b846c6a4acd2f456618087f12713" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06718a168365cad3d5ff0bb133aad346959a2074bd4a85c121255a11304a8626" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/LICENSE b/LICENSE-AGPL-3.0
index be3f7b28e5..be3f7b28e5 100644 --- a/LICENSE +++ b/LICENSE-AGPL-3.0
diff --git a/LICENSE-COMMERCIAL b/LICENSE-COMMERCIAL new file mode 100644
index 0000000000..173e03e0c0 --- /dev/null +++ b/LICENSE-COMMERCIAL
@@ -0,0 +1,6 @@ +Licensees holding a valid commercial license with Element may use this +software in accordance with the terms contained in a written agreement +between you and Element. + +To purchase a commercial license please contact our sales team at +licensing@element.io diff --git a/README.rst b/README.rst
index d5625afe8f..8974990ed1 100644 --- a/README.rst +++ b/README.rst
@@ -10,14 +10,15 @@ implementation, written and maintained by `Element <https://element.io>`_. `Matrix <https://github.com/matrix-org>`__ is the open standard for secure and interoperable real time communications. You can directly run and manage the source code in this repository, available under an AGPL -license. There is no support provided from Element unless you have a -subscription. +license (or alternatively under a commercial license from Element). +There is no support provided by Element unless you have a +subscription from Element. -Subscription alternative -======================== +Subscription +============ -Alternatively, for those that need an enterprise-ready solution, Element -Server Suite (ESS) is `available as a subscription <https://element.io/pricing>`_. +For those that need an enterprise-ready solution, Element +Server Suite (ESS) is `available via subscription <https://element.io/pricing>`_. ESS builds on Synapse to offer a complete Matrix-based backend including the full `Admin Console product <https://element.io/enterprise-functionality/admin-console>`_, giving admins the power to easily manage an organization-wide @@ -158,7 +159,7 @@ it: We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to the public internet. Without it, anyone can freely register accounts on your homeserver. -This can be exploited by attackers to create spambots targetting the rest of the Matrix +This can be exploited by attackers to create spambots targeting the rest of the Matrix federation. Your new user name will be formed partly from the ``server_name``, and partly @@ -249,6 +250,22 @@ Developers might be particularly interested in: Alongside all that, join our developer community on Matrix: `#synapse-dev:matrix.org <https://matrix.to/#/#synapse-dev:matrix.org>`_, featuring real humans! +Copyright and Licensing +======================= + +| Copyright 2014-2017 OpenMarket Ltd +| Copyright 2017 Vector Creations Ltd +| Copyright 2017-2025 New Vector Ltd +| + +This software is dual-licensed by New Vector Ltd (Element). It can be used either: + +(1) for free under the terms of the GNU Affero General Public License (as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version); OR + +(2) under the terms of a paid-for Element Commercial License agreement between you and Element (the terms of which may vary depending on what you and Element have agreed to). + +Unless required by applicable law or agreed to in writing, software distributed under the Licenses is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the Licenses for the specific language governing permissions and limitations under the Licenses. + .. |support| image:: https://img.shields.io/badge/matrix-community%20support-success :alt: (get community support in #synapse:matrix.org) diff --git a/build_rust.py b/build_rust.py
index 662474dcb4..d2726cee26 100644 --- a/build_rust.py +++ b/build_rust.py
@@ -1,8 +1,10 @@ # A build script for poetry that adds the rust extension. +import itertools import os from typing import Any, Dict +from packaging.specifiers import SpecifierSet from setuptools_rust import Binding, RustExtension @@ -14,6 +16,8 @@ def build(setup_kwargs: Dict[str, Any]) -> None: target="synapse.synapse_rust", path=cargo_toml_path, binding=Binding.PyO3, + # This flag is a no-op in the latest versions. Instead, we need to + # specify this in the `bdist_wheel` config below. py_limited_api=True, # We force always building in release mode, as we can't tell the # difference between using `poetry` in development vs production. @@ -21,3 +25,18 @@ def build(setup_kwargs: Dict[str, Any]) -> None: ) setup_kwargs.setdefault("rust_extensions", []).append(extension) setup_kwargs["zip_safe"] = False + + # We lookup the minimum supported python version by looking at + # `python_requires` (e.g. ">=3.9.0,<4.0.0") and finding the first python + # version that matches. We then convert that into the `py_limited_api` form, + # e.g. cp39 for python 3.9. + py_limited_api: str + python_bounds = SpecifierSet(setup_kwargs["python_requires"]) + for minor_version in itertools.count(start=8): + if f"3.{minor_version}.0" in python_bounds: + py_limited_api = f"cp3{minor_version}" + break + + setup_kwargs.setdefault("options", {}).setdefault("bdist_wheel", {})[ + "py_limited_api" + ] = py_limited_api diff --git a/changelog.d/17483.bugfix b/changelog.d/17483.bugfix deleted file mode 100644
index c97a802dbf..0000000000 --- a/changelog.d/17483.bugfix +++ /dev/null
@@ -1 +0,0 @@ -Start handlers for new media endpoints when media resource configured. diff --git a/changelog.d/17510.bugfix b/changelog.d/17510.bugfix deleted file mode 100644
index 3170c284bd..0000000000 --- a/changelog.d/17510.bugfix +++ /dev/null
@@ -1 +0,0 @@ -Fix timeline ordering (using `stream_ordering` instead of topological ordering) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17514.misc b/changelog.d/17514.misc deleted file mode 100644
index fc3cc37915..0000000000 --- a/changelog.d/17514.misc +++ /dev/null
@@ -1 +0,0 @@ -Add more tracing to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/changelog.d/17515.doc b/changelog.d/17515.doc deleted file mode 100644
index c2dbe24e9d..0000000000 --- a/changelog.d/17515.doc +++ /dev/null
@@ -1,3 +0,0 @@ -Clarify default behaviour of the -[`auto_accept_invites.worker_to_run_on`](https://element-hq.github.io/synapse/develop/usage/configuration/config_documentation.html#auto-accept-invites) -option. \ No newline at end of file diff --git a/changelog.d/17531.misc b/changelog.d/17531.misc deleted file mode 100644
index 25b7b36a72..0000000000 --- a/changelog.d/17531.misc +++ /dev/null
@@ -1 +0,0 @@ -Fixup comment in sliding sync implementation. diff --git a/changelog.d/17535.bugfix b/changelog.d/17535.bugfix deleted file mode 100644
index c5b5da0485..0000000000 --- a/changelog.d/17535.bugfix +++ /dev/null
@@ -1 +0,0 @@ -Fix experimental sliding sync implementation to remember any updates in rooms that were not sent down immediately. diff --git a/changelog.d/17536.misc b/changelog.d/17536.misc deleted file mode 100644
index 116ef0c36d..0000000000 --- a/changelog.d/17536.misc +++ /dev/null
@@ -1 +0,0 @@ -Replace override of deprecated method `HTTPAdapter.get_connection` with `get_connection_with_tls_context`. \ No newline at end of file diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py
index d4ddeb4dc7..9b5d33d2b1 100755 --- a/contrib/cmdclient/console.py +++ b/contrib/cmdclient/console.py
@@ -21,7 +21,8 @@ # # -""" Starts a synapse client console. """ +"""Starts a synapse client console.""" + import argparse import binascii import cmd @@ -244,7 +245,7 @@ class SynapseCmd(cmd.Cmd): if "flows" not in json_res: print("Failed to find any login flows.") - defer.returnValue(False) + return False flow = json_res["flows"][0] # assume first is the one we want. if "type" not in flow or "m.login.password" != flow["type"] or "stages" in flow: @@ -253,8 +254,8 @@ class SynapseCmd(cmd.Cmd): "Unable to login via the command line client. Please visit " "%s to login." % fallback_url ) - defer.returnValue(False) - defer.returnValue(True) + return False + return True def do_emailrequest(self, line): """Requests the association of a third party identifier diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py
index e6a10b5f32..54363e4259 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py
@@ -78,7 +78,7 @@ class TwistedHttpClient(HttpClient): url, data, headers_dict={"Content-Type": ["application/json"]} ) body = yield readBody(response) - defer.returnValue((response.code, body)) + return response.code, body @defer.inlineCallbacks def get_json(self, url, args=None): @@ -88,7 +88,7 @@ class TwistedHttpClient(HttpClient): url = "%s?%s" % (url, qs) response = yield self._create_get_request(url) body = yield readBody(response) - defer.returnValue(json.loads(body)) + return json.loads(body) def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None): """Wrapper of _create_request to issue a PUT request""" @@ -134,7 +134,7 @@ class TwistedHttpClient(HttpClient): response = yield self._create_request(method, url) body = yield readBody(response) - defer.returnValue(json.loads(body)) + return json.loads(body) @defer.inlineCallbacks def _create_request( @@ -173,7 +173,7 @@ class TwistedHttpClient(HttpClient): if self.verbose: print("Status %s %s" % (response.code, response.phrase)) print(pformat(list(response.headers.getAllRawHeaders()))) - defer.returnValue(response) + return response def sleep(self, seconds): d = defer.Deferred() diff --git a/contrib/docker/README.md b/contrib/docker/README.md
index 89c1518bd0..fdfa96795a 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md
@@ -30,3 +30,6 @@ docker-compose up -d ### More information For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md) + +**For a more comprehensive Docker Compose example showcasing a full Matrix 2.0 stack, please see +https://github.com/element-hq/element-docker-demo** \ No newline at end of file diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml
index 36d5fd5309..9dffc852fd 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml
@@ -51,7 +51,7 @@ services: - traefik.http.routers.https-synapse.tls.certResolver=le-ssl db: - image: docker.io/postgres:12-alpine + image: docker.io/postgres:15-alpine # Change that password, of course! environment: - POSTGRES_USER=synapse diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md
index 81518f6ba1..16c8c26795 100644 --- a/contrib/docker_compose_workers/README.md +++ b/contrib/docker_compose_workers/README.md
@@ -8,6 +8,9 @@ All examples and snippets assume that your Synapse service is called `synapse` i An example Docker Compose file can be found [here](docker-compose.yaml). +**For a more comprehensive Docker Compose example, showcasing a full Matrix 2.0 stack (originally based on this +docker-compose.yaml), please see https://github.com/element-hq/element-docker-demo** + ## Worker Service Examples in Docker Compose In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`). diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json
index 30d6d87500..62b58a199d 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json
@@ -220,29 +220,24 @@ "yBucketBound": "auto" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { - "uid": "${DS_PROMETHEUS}" + "uid": "${DS_PROMETHEUS}", + "type": "prometheus" }, - "description": "", + "aliasColors": {}, + "dashLength": 10, "fieldConfig": { "defaults": { "links": [] }, "overrides": [] }, - "fill": 0, - "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 12, "y": 1 }, - "hiddenSeries": false, "id": 152, "legend": { "avg": false, @@ -255,71 +250,81 @@ "values": false }, "lines": true, - "linewidth": 0, - "links": [], "nullPointMode": "connected", "options": { "alertThreshold": true }, "paceLength": 10, - "percentage": false, - "pluginVersion": "9.2.2", + "pluginVersion": "10.4.3", "pointradius": 5, - "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "Avg", "fill": 0, - "linewidth": 3 + "linewidth": 3, + "$$hashKey": "object:48" }, { "alias": "99%", "color": "#C4162A", - "fillBelowTo": "90%" + "fillBelowTo": "90%", + "$$hashKey": "object:49" }, { "alias": "90%", "color": "#FF7383", - "fillBelowTo": "75%" + "fillBelowTo": "75%", + "$$hashKey": "object:50" }, { "alias": "75%", "color": "#FFEE52", - "fillBelowTo": "50%" + "fillBelowTo": "50%", + "$$hashKey": "object:51" }, { "alias": "50%", "color": "#73BF69", - "fillBelowTo": "25%" + "fillBelowTo": "25%", + "$$hashKey": "object:52" }, { "alias": "25%", "color": "#1F60C4", - "fillBelowTo": "5%" + "fillBelowTo": "5%", + "$$hashKey": "object:53" }, { "alias": "5%", - "lines": false + "lines": false, + "$$hashKey": "object:54" }, { "alias": "Average", "color": "rgb(255, 255, 255)", "lines": true, - "linewidth": 3 + "linewidth": 3, + "$$hashKey": "object:55" }, { - "alias": "Events", + "alias": "Local events being persisted", + "color": "#96d98D", + "points": true, + "yaxis": 2, + "zindex": -3, + "$$hashKey": "object:56" + }, + { + "$$hashKey": "object:329", "color": "#B877D9", - "hideTooltip": true, + "alias": "All events being persisted", "points": true, "yaxis": 2, "zindex": -3 } ], "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { @@ -384,7 +389,20 @@ }, "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))", "legendFormat": "Average", - "refId": "H" + "refId": "H", + "editorMode": "code", + "range": true + }, + { + "datasource": { + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))", + "hide": false, + "instant": false, + "legendFormat": "Local events being persisted", + "refId": "E", + "editorMode": "code" }, { "datasource": { @@ -393,8 +411,9 @@ "expr": "sum(rate(synapse_storage_events_persisted_events_total{instance=\"$instance\"}[$bucket_size]))", "hide": false, "instant": false, - "legendFormat": "Events", - "refId": "E" + "legendFormat": "All events being persisted", + "refId": "I", + "editorMode": "code" } ], "thresholds": [ @@ -428,7 +447,9 @@ "xaxis": { "mode": "time", "show": true, - "values": [] + "values": [], + "name": null, + "buckets": null }, "yaxes": [ { @@ -450,7 +471,20 @@ ], "yaxis": { "align": false - } + }, + "bars": false, + "dashes": false, + "description": "", + "fill": 0, + "fillGradient": 0, + "hiddenSeries": false, + "linewidth": 0, + "percentage": false, + "points": false, + "stack": false, + "steppedLine": false, + "timeFrom": null, + "timeShift": null }, { "aliasColors": {}, diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py
index 779590768f..1d74fee822 100644 --- a/contrib/graph/graph.py +++ b/contrib/graph/graph.py
@@ -20,8 +20,8 @@ # import argparse -import cgi import datetime +import html import json import urllib.request from typing import List @@ -85,7 +85,7 @@ def make_graph(pdus: List[dict], filename_prefix: str) -> None: "name": name, "type": pdu.get("pdu_type"), "state_key": pdu.get("state_key"), - "content": cgi.escape(json.dumps(pdu.get("content")), quote=True), + "content": html.escape(json.dumps(pdu.get("content")), quote=True), "time": t, "depth": pdu.get("depth"), } diff --git a/contrib/vertobot/.gitignore b/contrib/vertobot/.gitignore deleted file mode 100644
index 071a780574..0000000000 --- a/contrib/vertobot/.gitignore +++ /dev/null
@@ -1,2 +0,0 @@ -vucbot.yaml -vertobot.yaml diff --git a/contrib/vertobot/bot.pl b/contrib/vertobot/bot.pl deleted file mode 100755
index 31eed40925..0000000000 --- a/contrib/vertobot/bot.pl +++ /dev/null
@@ -1,309 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; -use 5.010; # // -use IO::Socket::SSL qw(SSL_VERIFY_NONE); -use IO::Async::Loop; -use Net::Async::WebSocket::Client; -use Net::Async::Matrix 0.11_002; -use JSON; -use YAML; -use Data::UUID; -use Getopt::Long; -use Data::Dumper; - -binmode STDOUT, ":encoding(UTF-8)"; -binmode STDERR, ":encoding(UTF-8)"; - -my $loop = IO::Async::Loop->new; -# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See -# https://rt.cpan.org/Ticket/Display.html?id=93107 -ref $loop eq "IO::Async::Loop::Poll" and - warn "Using SSL with IO::Poll causes known memory-leaks!!\n"; - -GetOptions( - 'C|config=s' => \my $CONFIG, - 'eval-from=s' => \my $EVAL_FROM, -) or exit 1; - -if( defined $EVAL_FROM ) { - # An emergency 'eval() this file' hack - $SIG{HUP} = sub { - my $code = do { - open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return; - local $/; <$fh> - }; - - eval $code or warn "Cannot eval() - $@"; - }; -} - -defined $CONFIG or die "Must supply --config\n"; - -my %CONFIG = %{ YAML::LoadFile( $CONFIG ) }; - -my %MATRIX_CONFIG = %{ $CONFIG{matrix} }; -# No harm in always applying this -$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE; - -# Track every Room object, so we can ->leave them all on shutdown -my %bot_matrix_rooms; - -my $bridgestate = {}; -my $roomid_by_callid = {}; - -my $bot_verto = Net::Async::WebSocket::Client->new( - on_frame => sub { - my ( $self, $frame ) = @_; - warn "[Verto] receiving $frame"; - on_verto_json($frame); - }, -); -$loop->add( $bot_verto ); - -my $sessid = lc new Data::UUID->create_str(); - -my $bot_matrix = Net::Async::Matrix->new( - %MATRIX_CONFIG, - on_log => sub { warn "log: @_\n" }, - on_invite => sub { - my ($matrix, $invite) = @_; - warn "[Matrix] invited to: " . $invite->{room_id} . " by " . $invite->{inviter} . "\n"; - - $matrix->join_room( $invite->{room_id} )->get; - }, - on_room_new => sub { - my ($matrix, $room) = @_; - - warn "[Matrix] have a room ID: " . $room->room_id . "\n"; - - $bot_matrix_rooms{$room->room_id} = $room; - - # log in to verto on behalf of this room - $bridgestate->{$room->room_id}->{sessid} = $sessid; - - $room->configure( - on_message => \&on_room_message, - ); - - my $f = send_verto_json_request("login", { - 'login' => $CONFIG{'verto-dialog-params'}{'login'}, - 'passwd' => $CONFIG{'verto-config'}{'passwd'}, - 'sessid' => $sessid, - }); - $matrix->adopt_future($f); - - # we deliberately don't paginate the room, as we only care about - # new calls - }, - on_unknown_event => \&on_unknown_event, - on_error => sub { - print STDERR "Matrix failure: @_\n"; - }, -); -$loop->add( $bot_matrix ); - -sub on_unknown_event -{ - my ($matrix, $event) = @_; - print Dumper($event); - - my $room_id = $event->{room_id}; - my %dp = %{$CONFIG{'verto-dialog-params'}}; - $dp{callID} = $bridgestate->{$room_id}->{callid}; - - if ($event->{type} eq 'm.call.invite') { - $bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id}; - $bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str(); - $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp}; - $bridgestate->{$room_id}->{gathered_candidates} = 0; - $roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id; - # no trickle ICE in verto apparently - } - elsif ($event->{type} eq 'm.call.candidates') { - # XXX: compare call IDs - if (!$bridgestate->{$room_id}->{gathered_candidates}) { - $bridgestate->{$room_id}->{gathered_candidates} = 1; - my $offer = $bridgestate->{$room_id}->{offer}; - my $candidate_block = { - audio => '', - video => '', - }; - foreach (@{$event->{content}->{candidates}}) { - if ($_->{sdpMid}) { - $candidate_block->{$_->{sdpMid}} .= "a=" . $_->{candidate} . "\r\n"; - } - else { - $candidate_block->{audio} .= "a=" . $_->{candidate} . "\r\n"; - $candidate_block->{video} .= "a=" . $_->{candidate} . "\r\n"; - } - } - - # XXX: assumes audio comes first - #$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{audio}/; - #$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{video}/; - - $offer =~ s/(m=video)/$candidate_block->{audio}$1/; - $offer =~ s/(.$)/$1\n$candidate_block->{video}$1/; - - my $f = send_verto_json_request("verto.invite", { - "sdp" => $offer, - "dialogParams" => \%dp, - "sessid" => $bridgestate->{$room_id}->{sessid}, - }); - $matrix->adopt_future($f); - } - else { - # ignore them, as no trickle ICE, although we might as well - # batch them up - # foreach (@{$event->{content}->{candidates}}) { - # push @{$bridgestate->{$room_id}->{candidates}}, $_; - # } - } - } - elsif ($event->{type} eq 'm.call.hangup') { - if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) { - my $f = send_verto_json_request("verto.bye", { - "dialogParams" => \%dp, - "sessid" => $bridgestate->{$room_id}->{sessid}, - }); - $matrix->adopt_future($f); - } - else { - warn "Ignoring unrecognised callid: ".$event->{content}->{call_id}; - } - } - else { - warn "Unhandled event: $event->{type}"; - } -} - -sub on_room_message -{ - my ($room, $from, $content) = @_; - my $room_id = $room->room_id; - warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n"; -} - -Future->needs_all( - $bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub { - $bot_matrix->start; - }), - - $bot_verto->connect( - %{ $CONFIG{"verto-bot"} }, - on_connect_error => sub { die "Cannot connect to verto - $_[-1]" }, - on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" }, - )->on_done( sub { - warn("[Verto] connected to websocket"); - }), -)->get; - -$loop->attach_signal( - PIPE => sub { warn "pipe\n" } -); -$loop->attach_signal( - INT => sub { $loop->stop }, -); -$loop->attach_signal( - TERM => sub { $loop->stop }, -); - -eval { - $loop->run; -} or my $e = $@; - -# When the bot gets shut down, have it leave the rooms so it's clear to observers -# that it is no longer running. -# if( $CONFIG{"leave-on-shutdown"} // 1 ) { -# print STDERR "Removing bot from Matrix rooms...\n"; -# Future->wait_all( map { $_->leave->else_done() } values %bot_matrix_rooms )->get; -# } -# else { -# print STDERR "Leaving bot users in Matrix rooms.\n"; -# } - -die $e if $e; - -exit 0; - -{ - my $json_id; - my $requests; - - sub send_verto_json_request - { - $json_id ||= 1; - - my ($method, $params) = @_; - my $json = { - jsonrpc => "2.0", - method => $method, - params => $params, - id => $json_id, - }; - my $text = JSON->new->encode( $json ); - warn "[Verto] sending $text"; - $bot_verto->send_frame ( $text ); - my $request = $loop->new_future; - $requests->{$json_id} = $request; - $json_id++; - return $request; - } - - sub send_verto_json_response - { - my ($result, $id) = @_; - my $json = { - jsonrpc => "2.0", - result => $result, - id => $id, - }; - my $text = JSON->new->encode( $json ); - warn "[Verto] sending $text"; - $bot_verto->send_frame ( $text ); - } - - sub on_verto_json - { - my $json = JSON->new->decode( $_[0] ); - if ($json->{method}) { - if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) || - $json->{method} eq 'verto.media') { - - my $room_id = $roomid_by_callid->{$json->{params}->{callID}}; - my $room = $bot_matrix_rooms{$room_id}; - - if ($json->{params}->{sdp}) { - # HACK HACK HACK HACK - $room->_do_POST_json( "/send/m.call.answer", { - call_id => $bridgestate->{$room_id}->{matrix_callid}, - version => 0, - answer => { - sdp => $json->{params}->{sdp}, - type => "answer", - }, - })->then( sub { - send_verto_json_response( { - method => $json->{method}, - }, $json->{id}); - })->get; - } - } - else { - warn ("[Verto] unhandled method: " . $json->{method}); - send_verto_json_response( { - method => $json->{method}, - }, $json->{id}); - } - } - elsif ($json->{result}) { - $requests->{$json->{id}}->done($json->{result}); - } - elsif ($json->{error}) { - $requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error}); - } - } -} - diff --git a/contrib/vertobot/bridge.pl b/contrib/vertobot/bridge.pl deleted file mode 100755
index a551850f40..0000000000 --- a/contrib/vertobot/bridge.pl +++ /dev/null
@@ -1,493 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; -use 5.010; # // -use IO::Socket::SSL qw(SSL_VERIFY_NONE); -use IO::Async::Loop; -use Net::Async::WebSocket::Client; -use Net::Async::HTTP; -use Net::Async::HTTP::Server; -use JSON; -use YAML; -use Data::UUID; -use Getopt::Long; -use Data::Dumper; -use URI::Encode qw(uri_encode uri_decode); - -binmode STDOUT, ":encoding(UTF-8)"; -binmode STDERR, ":encoding(UTF-8)"; - -my $msisdn_to_matrix = { - '447417892400' => '@matthew:matrix.org', -}; - -my $matrix_to_msisdn = {}; -foreach (keys %$msisdn_to_matrix) { - $matrix_to_msisdn->{$msisdn_to_matrix->{$_}} = $_; -} - - -my $loop = IO::Async::Loop->new; -# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See -# https://rt.cpan.org/Ticket/Display.html?id=93107 -# ref $loop eq "IO::Async::Loop::Poll" and -# warn "Using SSL with IO::Poll causes known memory-leaks!!\n"; - -GetOptions( - 'C|config=s' => \my $CONFIG, - 'eval-from=s' => \my $EVAL_FROM, -) or exit 1; - -if( defined $EVAL_FROM ) { - # An emergency 'eval() this file' hack - $SIG{HUP} = sub { - my $code = do { - open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return; - local $/; <$fh> - }; - - eval $code or warn "Cannot eval() - $@"; - }; -} - -defined $CONFIG or die "Must supply --config\n"; - -my %CONFIG = %{ YAML::LoadFile( $CONFIG ) }; - -my %MATRIX_CONFIG = %{ $CONFIG{matrix} }; -# No harm in always applying this -$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE; - -my $bridgestate = {}; -my $roomid_by_callid = {}; - -my $sessid = lc new Data::UUID->create_str(); -my $as_token = $CONFIG{"matrix-bot"}->{as_token}; -my $hs_domain = $CONFIG{"matrix-bot"}->{domain}; - -my $http = Net::Async::HTTP->new(); -$loop->add( $http ); - -sub create_virtual_user -{ - my ($localpart) = @_; - my ( $response ) = $http->do_request( - method => "POST", - uri => URI->new( - $CONFIG{"matrix"}->{server}. - "/_matrix/client/api/v1/register?". - "access_token=$as_token&user_id=$localpart" - ), - content_type => "application/json", - content => <<EOT -{ - "type": "m.login.application_service", - "user": "$localpart" -} -EOT - )->get; - warn $response->as_string if ($response->code != 200); -} - -my $http_server = Net::Async::HTTP::Server->new( - on_request => sub { - my $self = shift; - my ( $req ) = @_; - - my $response; - my $path = uri_decode($req->path); - warn("request: $path"); - if ($path =~ m#/users/\@(\+.*)#) { - # when queried about virtual users, auto-create them in the HS - my $localpart = $1; - create_virtual_user($localpart); - $response = HTTP::Response->new( 200 ); - $response->add_content('{}'); - $response->content_type( "application/json" ); - } - elsif ($path =~ m#/transactions/(.*)#) { - my $event = JSON->new->decode($req->body); - print Dumper($event); - - my $room_id = $event->{room_id}; - my %dp = %{$CONFIG{'verto-dialog-params'}}; - $dp{callID} = $bridgestate->{$room_id}->{callid}; - - if ($event->{type} eq 'm.room.membership') { - my $membership = $event->{content}->{membership}; - my $state_key = $event->{state_key}; - my $room_id = $event->{state_id}; - - if ($membership eq 'invite') { - # autojoin invites - my ( $response ) = $http->do_request( - method => "POST", - uri => URI->new( - $CONFIG{"matrix"}->{server}. - "/_matrix/client/api/v1/rooms/$room_id/join?". - "access_token=$as_token&user_id=$state_key" - ), - content_type => "application/json", - content => "{}", - )->get; - warn $response->as_string if ($response->code != 200); - } - } - elsif ($event->{type} eq 'm.call.invite') { - my $room_id = $event->{room_id}; - $bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id}; - $bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str(); - $bridgestate->{$room_id}->{sessid} = $sessid; - # $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp}; - my $offer = $event->{content}->{offer}->{sdp}; - # $bridgestate->{$room_id}->{gathered_candidates} = 0; - $roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id; - # no trickle ICE in verto apparently - - my $f = send_verto_json_request("verto.invite", { - "sdp" => $offer, - "dialogParams" => \%dp, - "sessid" => $bridgestate->{$room_id}->{sessid}, - }); - $self->adopt_future($f); - } - # elsif ($event->{type} eq 'm.call.candidates') { - # # XXX: this could fire for both matrix->verto and verto->matrix calls - # # and races as it collects candidates. much better to just turn off - # # candidate gathering in the webclient entirely for now - # - # my $room_id = $event->{room_id}; - # # XXX: compare call IDs - # if (!$bridgestate->{$room_id}->{gathered_candidates}) { - # $bridgestate->{$room_id}->{gathered_candidates} = 1; - # my $offer = $bridgestate->{$room_id}->{offer}; - # my $candidate_block = ""; - # foreach (@{$event->{content}->{candidates}}) { - # $candidate_block .= "a=" . $_->{candidate} . "\r\n"; - # } - # # XXX: collate using the right m= line - for now assume audio call - # $offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/; - # - # my $f = send_verto_json_request("verto.invite", { - # "sdp" => $offer, - # "dialogParams" => \%dp, - # "sessid" => $bridgestate->{$room_id}->{sessid}, - # }); - # $self->adopt_future($f); - # } - # else { - # # ignore them, as no trickle ICE, although we might as well - # # batch them up - # # foreach (@{$event->{content}->{candidates}}) { - # # push @{$bridgestate->{$room_id}->{candidates}}, $_; - # # } - # } - # } - elsif ($event->{type} eq 'm.call.answer') { - # grab the answer and relay it to verto as a verto.answer - my $room_id = $event->{room_id}; - - my $answer = $event->{content}->{answer}->{sdp}; - my $f = send_verto_json_request("verto.answer", { - "sdp" => $answer, - "dialogParams" => \%dp, - "sessid" => $bridgestate->{$room_id}->{sessid}, - }); - $self->adopt_future($f); - } - elsif ($event->{type} eq 'm.call.hangup') { - my $room_id = $event->{room_id}; - if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) { - my $f = send_verto_json_request("verto.bye", { - "dialogParams" => \%dp, - "sessid" => $bridgestate->{$room_id}->{sessid}, - }); - $self->adopt_future($f); - } - else { - warn "Ignoring unrecognised callid: ".$event->{content}->{call_id}; - } - } - else { - warn "Unhandled event: $event->{type}"; - } - - $response = HTTP::Response->new( 200 ); - $response->add_content('{}'); - $response->content_type( "application/json" ); - } - else { - warn "Unhandled path: $path"; - $response = HTTP::Response->new( 404 ); - } - - $req->respond( $response ); - }, -); -$loop->add( $http_server ); - -$http_server->listen( - addr => { family => "inet", socktype => "stream", port => 8009 }, - on_listen_error => sub { die "Cannot listen - $_[-1]\n" }, -); - -my $bot_verto = Net::Async::WebSocket::Client->new( - on_frame => sub { - my ( $self, $frame ) = @_; - warn "[Verto] receiving $frame"; - on_verto_json($frame); - }, -); -$loop->add( $bot_verto ); - -my $verto_connecting = $loop->new_future; -$bot_verto->connect( - %{ $CONFIG{"verto-bot"} }, - on_connected => sub { - warn("[Verto] connected to websocket"); - if (not $verto_connecting->is_done) { - $verto_connecting->done($bot_verto); - - send_verto_json_request("login", { - 'login' => $CONFIG{'verto-dialog-params'}{'login'}, - 'passwd' => $CONFIG{'verto-config'}{'passwd'}, - 'sessid' => $sessid, - }); - } - }, - on_connect_error => sub { die "Cannot connect to verto - $_[-1]" }, - on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" }, -); - -# die Dumper($verto_connecting); - -my $as_url = $CONFIG{"matrix-bot"}->{as_url}; - -Future->needs_all( - $http->do_request( - method => "POST", - uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ), - content_type => "application/json", - content => <<EOT -{ - "as_token": "$as_token", - "url": "$as_url", - "namespaces": { "users": [ { "regex": "\@\\\\+.*", "exclusive": false } ] } -} -EOT - )->then( sub{ - my ($response) = (@_); - warn $response->as_string if ($response->code != 200); - return Future->done; - }), - $verto_connecting, -)->get; - -$loop->attach_signal( - PIPE => sub { warn "pipe\n" } -); -$loop->attach_signal( - INT => sub { $loop->stop }, -); -$loop->attach_signal( - TERM => sub { $loop->stop }, -); - -eval { - $loop->run; -} or my $e = $@; - -die $e if $e; - -exit 0; - -{ - my $json_id; - my $requests; - - sub send_verto_json_request - { - $json_id ||= 1; - - my ($method, $params) = @_; - my $json = { - jsonrpc => "2.0", - method => $method, - params => $params, - id => $json_id, - }; - my $text = JSON->new->encode( $json ); - warn "[Verto] sending $text"; - $bot_verto->send_frame ( $text ); - my $request = $loop->new_future; - $requests->{$json_id} = $request; - $json_id++; - return $request; - } - - sub send_verto_json_response - { - my ($result, $id) = @_; - my $json = { - jsonrpc => "2.0", - result => $result, - id => $id, - }; - my $text = JSON->new->encode( $json ); - warn "[Verto] sending $text"; - $bot_verto->send_frame ( $text ); - } - - sub on_verto_json - { - my $json = JSON->new->decode( $_[0] ); - if ($json->{method}) { - if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) || - $json->{method} eq 'verto.media') { - - my $caller = $json->{dialogParams}->{caller_id_number}; - my $callee = $json->{dialogParams}->{destination_number}; - my $caller_user = '@+' . $caller . ':' . $hs_domain; - my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee"; - my $room_id = $roomid_by_callid->{$json->{params}->{callID}}; - - if ($json->{params}->{sdp}) { - $http->do_request( - method => "POST", - uri => URI->new( - $CONFIG{"matrix"}->{server}. - "/_matrix/client/api/v1/send/m.call.answer?". - "access_token=$as_token&user_id=$caller_user" - ), - content_type => "application/json", - content => JSON->new->encode({ - call_id => $bridgestate->{$room_id}->{matrix_callid}, - version => 0, - answer => { - sdp => $json->{params}->{sdp}, - type => "answer", - }, - }), - )->then( sub { - send_verto_json_response( { - method => $json->{method}, - }, $json->{id}); - })->get; - } - } - elsif ($json->{method} eq 'verto.invite') { - my $caller = $json->{dialogParams}->{caller_id_number}; - my $callee = $json->{dialogParams}->{destination_number}; - my $caller_user = '@+' . $caller . ':' . $hs_domain; - my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee"; - - my $alias = ($caller lt $callee) ? ($caller.'-'.$callee) : ($callee.'-'.$caller); - my $room_id; - - # create a virtual user for the caller if needed. - create_virtual_user($caller); - - # create a room of form #peer-peer and invite the callee - $http->do_request( - method => "POST", - uri => URI->new( - $CONFIG{"matrix"}->{server}. - "/_matrix/client/api/v1/createRoom?". - "access_token=$as_token&user_id=$caller_user" - ), - content_type => "application/json", - content => JSON->new->encode({ - room_alias_name => $alias, - invite => [ $callee_user ], - }), - )->then( sub { - my ( $response ) = @_; - my $resp = JSON->new->decode($response->content); - $room_id = $resp->{room_id}; - $roomid_by_callid->{$json->{params}->{callID}} = $room_id; - })->get; - - # join it - my ($response) = $http->do_request( - method => "POST", - uri => URI->new( - $CONFIG{"matrix"}->{server}. - "/_matrix/client/api/v1/join/$room_id?". - "access_token=$as_token&user_id=$caller_user" - ), - content_type => "application/json", - content => '{}', - )->get; - - $bridgestate->{$room_id}->{matrix_callid} = lc new Data::UUID->create_str(); - $bridgestate->{$room_id}->{callid} = $json->{dialogParams}->{callID}; - $bridgestate->{$room_id}->{sessid} = $sessid; - - # put the m.call.invite in there - $http->do_request( - method => "POST", - uri => URI->new( - $CONFIG{"matrix"}->{server}. - "/_matrix/client/api/v1/send/m.call.invite?". - "access_token=$as_token&user_id=$caller_user" - ), - content_type => "application/json", - content => JSON->new->encode({ - call_id => $bridgestate->{$room_id}->{matrix_callid}, - version => 0, - answer => { - sdp => $json->{params}->{sdp}, - type => "offer", - }, - }), - )->then( sub { - # acknowledge the verto - send_verto_json_response( { - method => $json->{method}, - }, $json->{id}); - })->get; - } - elsif ($json->{method} eq 'verto.bye') { - my $caller = $json->{dialogParams}->{caller_id_number}; - my $callee = $json->{dialogParams}->{destination_number}; - my $caller_user = '@+' . $caller . ':' . $hs_domain; - my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee"; - my $room_id = $roomid_by_callid->{$json->{params}->{callID}}; - - # put the m.call.hangup into the room - $http->do_request( - method => "POST", - uri => URI->new( - $CONFIG{"matrix"}->{server}. - "/_matrix/client/api/v1/send/m.call.hangup?". - "access_token=$as_token&user_id=$caller_user" - ), - content_type => "application/json", - content => JSON->new->encode({ - call_id => $bridgestate->{$room_id}->{matrix_callid}, - version => 0, - }), - )->then( sub { - # acknowledge the verto - send_verto_json_response( { - method => $json->{method}, - }, $json->{id}); - })->get; - } - else { - warn ("[Verto] unhandled method: " . $json->{method}); - send_verto_json_response( { - method => $json->{method}, - }, $json->{id}); - } - } - elsif ($json->{result}) { - $requests->{$json->{id}}->done($json->{result}); - } - elsif ($json->{error}) { - $requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error}); - } - } -} - diff --git a/contrib/vertobot/config.yaml b/contrib/vertobot/config.yaml deleted file mode 100644
index 555d9389d7..0000000000 --- a/contrib/vertobot/config.yaml +++ /dev/null
@@ -1,32 +0,0 @@ -# Generic Matrix connection params -matrix: - server: 'matrix.org' - SSL: 1 - -# Bot-user connection details -matrix-bot: - user_id: '@vertobot:matrix.org' - password: '' - domain: 'matrix.org" - as_url: 'http://localhost:8009' - as_token: 'vertobot123' - -verto-bot: - host: webrtc.freeswitch.org - service: 8081 - url: "ws://webrtc.freeswitch.org:8081/" - -verto-config: - passwd: 1234 - -verto-dialog-params: - useVideo: false - useStereo: false - tag: "webcam" - login: "1008@webrtc.freeswitch.org" - destination_number: "9664" - caller_id_name: "FreeSWITCH User" - caller_id_number: "1008" - callID: "" - remote_caller_id_name: "Outbound Call" - remote_caller_id_number: "9664" diff --git a/contrib/vertobot/cpanfile b/contrib/vertobot/cpanfile deleted file mode 100644
index 800dc288ed..0000000000 --- a/contrib/vertobot/cpanfile +++ /dev/null
@@ -1,14 +0,0 @@ -requires 'parent', 0; -requires 'Future', '>= 0.29'; -requires 'Net::Async::Matrix', '>= 0.11_002'; -requires 'Net::Async::Matrix::Utils'; -requires 'Net::Async::WebSocket::Protocol', 0; -requires 'Data::UUID', 0; -requires 'IO::Async', '>= 0.63'; -requires 'IO::Async::SSL', 0; -requires 'IO::Socket::SSL', 0; -requires 'YAML', 0; -requires 'JSON', 0; -requires 'Getopt::Long', 0; - - diff --git a/contrib/vertobot/verto-example.json b/contrib/vertobot/verto-example.json deleted file mode 100644
index e0230498a6..0000000000 --- a/contrib/vertobot/verto-example.json +++ /dev/null
@@ -1,207 +0,0 @@ -# JSON is shown in *reverse* chronological order. -# Send v. Receive is implicit. - -{ - "jsonrpc": "2.0", - "id": 7, - "result": { - "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef", - "message": "CALL ENDED", - "causeCode": 16, - "cause": "NORMAL_CLEARING", - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - } -} - -{ - "jsonrpc": "2.0", - "method": "verto.bye", - "params": { - "dialogParams": { - "useVideo": false, - "useStereo": true, - "tag": "webcam", - "login": "1008@webrtc.freeswitch.org", - "destination_number": "9664", - "caller_id_name": "FreeSWITCH User", - "caller_id_number": "1008", - "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef", - "remote_caller_id_name": "Outbound Call", - "remote_caller_id_number": "9664" - }, - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - }, - "id": 7 -} - -{ - "jsonrpc": "2.0", - "id": 6, - "result": { - "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef", - "action": "toggleHold", - "holdState": "active", - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - } -} - -{ - "jsonrpc": "2.0", - "method": "verto.modify", - "params": { - "action": "toggleHold", - "dialogParams": { - "useVideo": false, - "useStereo": true, - "tag": "webcam", - "login": "1008@webrtc.freeswitch.org", - "destination_number": "9664", - "caller_id_name": "FreeSWITCH User", - "caller_id_number": "1008", - "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef", - "remote_caller_id_name": "Outbound Call", - "remote_caller_id_number": "9664" - }, - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - }, - "id": 6 -} - -{ - "jsonrpc": "2.0", - "id": 5, - "result": { - "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef", - "action": "toggleHold", - "holdState": "held", - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - } -} - -{ - "jsonrpc": "2.0", - "method": "verto.modify", - "params": { - "action": "toggleHold", - "dialogParams": { - "useVideo": false, - "useStereo": true, - "tag": "webcam", - "login": "1008@webrtc.freeswitch.org", - "destination_number": "9664", - "caller_id_name": "FreeSWITCH User", - "caller_id_number": "1008", - "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef", - "remote_caller_id_name": "Outbound Call", - "remote_caller_id_number": "9664" - }, - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - }, - "id": 5 -} - -{ - "jsonrpc": "2.0", - "id": 349819, - "result": { - "method": "verto.answer" - } -} - -{ - "jsonrpc": "2.0", - "id": 349819, - "method": "verto.answer", - "params": { - "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef", - "sdp": "v=0\no=FreeSWITCH 1417101432 1417101433 IN IP4 209.105.235.10\ns=FreeSWITCH\nc=IN IP4 209.105.235.10\nt=0 0\na=msid-semantic: WMS jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq\nm=audio 30134 RTP/SAVPF 111 126\na=rtpmap:111 opus/48000/2\na=fmtp:111 minptime=10; stereo=1\na=rtpmap:126 telephone-event/8000\na=silenceSupp:off - - - -\na=ptime:20\na=sendrecv\na=fingerprint:sha-256 F8:72:18:E9:72:89:99:22:5B:F8:B6:C6:C6:0D:C5:9B:B2:FB:BC:CA:8D:AB:13:8A:66:E1:37:38:A0:16:AA:41\na=rtcp-mux\na=rtcp:30134 IN IP4 209.105.235.10\na=ssrc:210967934 cname:rOIEajpw4FocakWY\na=ssrc:210967934 msid:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq a0\na=ssrc:210967934 mslabel:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq\na=ssrc:210967934 label:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vqa0\na=ice-ufrag:OKwTmGLapwmxn7OF\na=ice-pwd:MmaMwq8rVmtWxfLbQ7U2Ew3T\na=candidate:2372654928 1 udp 659136 209.105.235.10 30134 typ host generation 0\n" - } -} - -{ - "jsonrpc": "2.0", - "id": 4, - "result": { - "message": "CALL CREATED", - "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef", - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - } -} - -{ - "jsonrpc": "2.0", - "method": "verto.invite", - "params": { - "sdp": "v=0\r\no=- 1381685806032722557 2 IN IP4 127.0.0.1\r\ns=-\r\nt=0 0\r\na=group:BUNDLE audio\r\na=msid-semantic: WMS 6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg\r\nm=audio 63088 RTP/SAVPF 111 103 104 0 8 106 105 13 126\r\nc=IN IP4 81.138.8.249\r\na=rtcp:63088 IN IP4 81.138.8.249\r\na=candidate:460398169 1 udp 2122260223 10.10.79.10 49945 typ host generation 0\r\na=candidate:460398169 2 udp 2122260223 10.10.79.10 49945 typ host generation 0\r\na=candidate:3460887983 1 udp 2122194687 192.168.1.64 63088 typ host generation 0\r\na=candidate:3460887983 2 udp 2122194687 192.168.1.64 63088 typ host generation 0\r\na=candidate:945327227 1 udp 1685987071 81.138.8.249 63088 typ srflx raddr 192.168.1.64 rport 63088 generation 0\r\na=candidate:945327227 2 udp 1685987071 81.138.8.249 63088 typ srflx raddr 192.168.1.64 rport 63088 generation 0\r\na=candidate:1441981097 1 tcp 1518280447 10.10.79.10 0 typ host tcptype active generation 0\r\na=candidate:1441981097 2 tcp 1518280447 10.10.79.10 0 typ host tcptype active generation 0\r\na=candidate:2160789855 1 tcp 1518214911 192.168.1.64 0 typ host tcptype active generation 0\r\na=candidate:2160789855 2 tcp 1518214911 192.168.1.64 0 typ host tcptype active generation 0\r\na=ice-ufrag:cP4qeRhn0LpcpA88\r\na=ice-pwd:fREmgSkXsDLGUUH1bwfrBQhW\r\na=ice-options:google-ice\r\na=fingerprint:sha-256 AF:35:64:1B:62:8A:EF:27:AE:2B:88:2E:FE:78:29:0B:08:DA:64:6C:DE:02:57:E3:EE:B1:D7:86:B8:36:8F:B0\r\na=setup:actpass\r\na=mid:audio\r\na=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\r\na=extmap:3 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\r\na=sendrecv\r\na=rtcp-mux\r\na=rtpmap:111 opus/48000/2\r\na=fmtp:111 minptime=10; stereo=1\r\na=rtpmap:103 ISAC/16000\r\na=rtpmap:104 ISAC/32000\r\na=rtpmap:0 PCMU/8000\r\na=rtpmap:8 PCMA/8000\r\na=rtpmap:106 CN/32000\r\na=rtpmap:105 CN/16000\r\na=rtpmap:13 CN/8000\r\na=rtpmap:126 telephone-event/8000\r\na=maxptime:60\r\na=ssrc:558827154 cname:vdKHBNqa17t2gmE3\r\na=ssrc:558827154 msid:6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg bf1303fb-9833-4d7d-b9e4-b32cfe04acc3\r\na=ssrc:558827154 mslabel:6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg\r\na=ssrc:558827154 label:bf1303fb-9833-4d7d-b9e4-b32cfe04acc3\r\n", - "dialogParams": { - "useVideo": false, - "useStereo": true, - "tag": "webcam", - "login": "1008@webrtc.freeswitch.org", - "destination_number": "9664", - "caller_id_name": "FreeSWITCH User", - "caller_id_number": "1008", - "callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef", - "remote_caller_id_name": "Outbound Call", - "remote_caller_id_number": "9664" - }, - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - }, - "id": 4 -} - -{ - "jsonrpc": "2.0", - "id": 3, - "result": { - "message": "logged in", - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - } -} - -{ - "jsonrpc": "2.0", - "id": 1, - "error": { - "code": -32000, - "message": "Authentication Required" - } -} - -{ - "jsonrpc": "2.0", - "method": "login", - "params": { - "login": "1008@webrtc.freeswitch.org", - "passwd": "1234", - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - }, - "id": 3 -} - -{ - "jsonrpc": "2.0", - "id": 2, - "error": { - "code": -32000, - "message": "Authentication Required" - } -} - -{ - "jsonrpc": "2.0", - "method": "login", - "params": { - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - }, - "id": 1 -} - -{ - "jsonrpc": "2.0", - "method": "login", - "params": { - "sessid": "03a11060-3e14-23b6-c620-51b892c52983" - }, - "id": 2 -} diff --git a/debian/build_virtualenv b/debian/build_virtualenv
index 5fc817b607..9e7fb95c8e 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv
@@ -35,7 +35,7 @@ TEMP_VENV="$(mktemp -d)" python3 -m venv "$TEMP_VENV" source "$TEMP_VENV/bin/activate" pip install -U pip -pip install poetry==1.3.2 +pip install poetry==2.1.1 poetry-plugin-export==1.9.0 poetry export \ --extras all \ --extras test \ diff --git a/debian/changelog b/debian/changelog
index e89fdb98dc..6afa33ad74 100644 --- a/debian/changelog +++ b/debian/changelog
@@ -1,3 +1,322 @@ +matrix-synapse-py3 (1.132.0) stable; urgency=medium + + * New Synapse release 1.132.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 17 Jun 2025 13:16:20 +0100 + +matrix-synapse-py3 (1.132.0~rc1) stable; urgency=medium + + * New Synapse release 1.132.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 10 Jun 2025 11:15:18 +0100 + +matrix-synapse-py3 (1.131.0) stable; urgency=medium + + * New Synapse release 1.131.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 03 Jun 2025 14:36:55 +0100 + +matrix-synapse-py3 (1.131.0~rc1) stable; urgency=medium + + * New synapse release 1.131.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Wed, 28 May 2025 10:25:44 +0000 + +matrix-synapse-py3 (1.130.0) stable; urgency=medium + + * New Synapse release 1.130.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 20 May 2025 08:34:13 -0600 + +matrix-synapse-py3 (1.130.0~rc1) stable; urgency=medium + + * New Synapse release 1.130.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 13 May 2025 10:44:04 +0100 + +matrix-synapse-py3 (1.129.0) stable; urgency=medium + + * New Synapse release 1.129.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 06 May 2025 12:22:11 +0100 + +matrix-synapse-py3 (1.129.0~rc2) stable; urgency=medium + + * New synapse release 1.129.0rc2. + + -- Synapse Packaging team <packages@matrix.org> Wed, 30 Apr 2025 13:13:16 +0000 + +matrix-synapse-py3 (1.129.0~rc1) stable; urgency=medium + + * New Synapse release 1.129.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 15 Apr 2025 10:47:43 -0600 + +matrix-synapse-py3 (1.128.0) stable; urgency=medium + + * New Synapse release 1.128.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 08 Apr 2025 14:09:54 +0100 + +matrix-synapse-py3 (1.128.0~rc1) stable; urgency=medium + + * Update Poetry to 2.1.1. + * New synapse release 1.128.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 01 Apr 2025 14:35:33 +0000 + +matrix-synapse-py3 (1.127.1) stable; urgency=medium + + * New Synapse release 1.127.1. + + -- Synapse Packaging team <packages@matrix.org> Wed, 26 Mar 2025 21:07:31 +0000 + +matrix-synapse-py3 (1.127.0) stable; urgency=medium + + * New Synapse release 1.127.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 25 Mar 2025 12:04:15 +0000 + +matrix-synapse-py3 (1.127.0~rc1) stable; urgency=medium + + * New Synapse release 1.127.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 18 Mar 2025 13:30:05 +0000 + +matrix-synapse-py3 (1.126.0) stable; urgency=medium + + * New Synapse release 1.126.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 11 Mar 2025 13:11:29 +0000 + +matrix-synapse-py3 (1.126.0~rc3) stable; urgency=medium + + * New Synapse release 1.126.0rc3. + + -- Synapse Packaging team <packages@matrix.org> Fri, 07 Mar 2025 15:45:05 +0000 + +matrix-synapse-py3 (1.126.0~rc2) stable; urgency=medium + + * New Synapse release 1.126.0rc2. + + -- Synapse Packaging team <packages@matrix.org> Wed, 05 Mar 2025 14:29:12 +0000 + +matrix-synapse-py3 (1.126.0~rc1) stable; urgency=medium + + * New Synapse release 1.126.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 04 Mar 2025 13:11:51 +0000 + +matrix-synapse-py3 (1.125.0) stable; urgency=medium + + * New Synapse release 1.125.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 25 Feb 2025 08:10:07 -0700 + +matrix-synapse-py3 (1.125.0~rc1) stable; urgency=medium + + * New synapse release 1.125.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 18 Feb 2025 13:32:49 +0000 + +matrix-synapse-py3 (1.124.0) stable; urgency=medium + + * New Synapse release 1.124.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 11 Feb 2025 11:55:22 +0100 + +matrix-synapse-py3 (1.124.0~rc3) stable; urgency=medium + + * New Synapse release 1.124.0rc3. + + -- Synapse Packaging team <packages@matrix.org> Fri, 07 Feb 2025 13:42:55 +0000 + +matrix-synapse-py3 (1.124.0~rc2) stable; urgency=medium + + * New Synapse release 1.124.0rc2. + + -- Synapse Packaging team <packages@matrix.org> Wed, 05 Feb 2025 16:35:53 +0000 + +matrix-synapse-py3 (1.124.0~rc1) stable; urgency=medium + + * New Synapse release 1.124.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 04 Feb 2025 11:53:05 +0000 + +matrix-synapse-py3 (1.123.0) stable; urgency=medium + + * New Synapse release 1.123.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 28 Jan 2025 08:37:34 -0700 + +matrix-synapse-py3 (1.123.0~rc1) stable; urgency=medium + + * New Synapse release 1.123.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 21 Jan 2025 14:39:57 +0100 + +matrix-synapse-py3 (1.122.0) stable; urgency=medium + + * New Synapse release 1.122.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 14 Jan 2025 14:14:14 +0000 + +matrix-synapse-py3 (1.122.0~rc1) stable; urgency=medium + + * New Synapse release 1.122.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 07 Jan 2025 14:06:19 +0000 + +matrix-synapse-py3 (1.121.1) stable; urgency=medium + + * New Synapse release 1.121.1. + + -- Synapse Packaging team <packages@matrix.org> Wed, 11 Dec 2024 18:24:48 +0000 + +matrix-synapse-py3 (1.121.0) stable; urgency=medium + + * New Synapse release 1.121.0. + + -- Synapse Packaging team <packages@matrix.org> Wed, 11 Dec 2024 13:12:30 +0100 + +matrix-synapse-py3 (1.121.0~rc1) stable; urgency=medium + + * New Synapse release 1.121.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Wed, 04 Dec 2024 14:47:23 +0000 + +matrix-synapse-py3 (1.120.2) stable; urgency=medium + + * New synapse release 1.120.2. + + -- Synapse Packaging team <packages@matrix.org> Tue, 03 Dec 2024 15:43:37 +0000 + +matrix-synapse-py3 (1.120.1) stable; urgency=medium + + * New synapse release 1.120.1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 03 Dec 2024 09:07:57 +0000 + +matrix-synapse-py3 (1.120.0) stable; urgency=medium + + * New synapse release 1.120.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 26 Nov 2024 13:10:23 +0000 + +matrix-synapse-py3 (1.120.0~rc1) stable; urgency=medium + + * New Synapse release 1.120.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Wed, 20 Nov 2024 15:02:21 +0000 + +matrix-synapse-py3 (1.119.0) stable; urgency=medium + + * New Synapse release 1.119.0. + + -- Synapse Packaging team <packages@matrix.org> Wed, 13 Nov 2024 13:57:51 +0000 + +matrix-synapse-py3 (1.119.0~rc2) stable; urgency=medium + + * New Synapse release 1.119.0rc2. + + -- Synapse Packaging team <packages@matrix.org> Mon, 11 Nov 2024 14:33:02 +0000 + +matrix-synapse-py3 (1.119.0~rc1) stable; urgency=medium + + * New Synapse release 1.119.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Wed, 06 Nov 2024 08:59:43 -0700 + +matrix-synapse-py3 (1.118.0) stable; urgency=medium + + * New Synapse release 1.118.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 29 Oct 2024 15:29:53 +0100 + +matrix-synapse-py3 (1.118.0~rc1) stable; urgency=medium + + * New Synapse release 1.118.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 22 Oct 2024 11:48:14 +0100 + +matrix-synapse-py3 (1.117.0) stable; urgency=medium + + * New Synapse release 1.117.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 15 Oct 2024 10:46:30 +0100 + +matrix-synapse-py3 (1.117.0~rc1) stable; urgency=medium + + * New Synapse release 1.117.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 08 Oct 2024 14:37:11 +0100 + +matrix-synapse-py3 (1.116.0) stable; urgency=medium + + * New Synapse release 1.116.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 01 Oct 2024 11:14:07 +0100 + +matrix-synapse-py3 (1.116.0~rc2) stable; urgency=medium + + * New synapse release 1.116.0rc2. + + -- Synapse Packaging team <packages@matrix.org> Thu, 26 Sep 2024 13:28:43 +0000 + +matrix-synapse-py3 (1.116.0~rc1) stable; urgency=medium + + * New synapse release 1.116.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Wed, 25 Sep 2024 09:34:07 +0000 + +matrix-synapse-py3 (1.115.0) stable; urgency=medium + + * New Synapse release 1.115.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 17 Sep 2024 14:32:10 +0100 + +matrix-synapse-py3 (1.115.0~rc2) stable; urgency=medium + + * New Synapse release 1.115.0rc2. + + -- Synapse Packaging team <packages@matrix.org> Thu, 12 Sep 2024 11:10:15 +0100 + +matrix-synapse-py3 (1.115.0~rc1) stable; urgency=medium + + * New Synapse release 1.115.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 10 Sep 2024 08:39:09 -0600 + +matrix-synapse-py3 (1.114.0) stable; urgency=medium + + * New Synapse release 1.114.0. + + -- Synapse Packaging team <packages@matrix.org> Mon, 02 Sep 2024 15:14:53 +0100 + +matrix-synapse-py3 (1.114.0~rc3) stable; urgency=medium + + * New Synapse release 1.114.0rc3. + + -- Synapse Packaging team <packages@matrix.org> Fri, 30 Aug 2024 16:38:05 +0100 + +matrix-synapse-py3 (1.114.0~rc2) stable; urgency=medium + + * New Synapse release 1.114.0rc2. + + -- Synapse Packaging team <packages@matrix.org> Fri, 30 Aug 2024 15:35:13 +0100 + +matrix-synapse-py3 (1.114.0~rc1) stable; urgency=medium + + * New synapse release 1.114.0rc1. + + -- Synapse Packaging team <packages@matrix.org> Tue, 20 Aug 2024 12:55:28 +0000 + +matrix-synapse-py3 (1.113.0) stable; urgency=medium + + * New Synapse release 1.113.0. + + -- Synapse Packaging team <packages@matrix.org> Tue, 13 Aug 2024 14:36:56 +0100 + matrix-synapse-py3 (1.113.0~rc1) stable; urgency=medium * New Synapse release 1.113.0rc1. diff --git a/debian/hash_password.1 b/debian/hash_password.1
index 39fa3ffcbf..af55e09c45 100644 --- a/debian/hash_password.1 +++ b/debian/hash_password.1
@@ -1,10 +1,13 @@ -.\" generated with Ronn-NG/v0.8.0 -.\" http://github.com/apjanke/ronn-ng/tree/0.8.0 -.TH "HASH_PASSWORD" "1" "July 2021" "" "" +.\" generated with Ronn-NG/v0.10.1 +.\" http://github.com/apjanke/ronn-ng/tree/0.10.1 +.TH "HASH_PASSWORD" "1" "August 2024" "" .SH "NAME" \fBhash_password\fR \- Calculate the hash of a new password, so that passwords can be reset .SH "SYNOPSIS" -\fBhash_password\fR [\fB\-p\fR|\fB\-\-password\fR [password]] [\fB\-c\fR|\fB\-\-config\fR \fIfile\fR] +.TS +allbox; +\fBhash_password\fR [\fB\-p\fR \fB\-\-password\fR [password]] [\fB\-c\fR \fB\-\-config\fR \fIfile\fR] +.TE .SH "DESCRIPTION" \fBhash_password\fR calculates the hash of a supplied password using bcrypt\. .P @@ -20,7 +23,7 @@ bcrypt_rounds: 17 password_config: pepper: "random hashing pepper" .SH "OPTIONS" .TP \fB\-p\fR, \fB\-\-password\fR -Read the password form the command line if [password] is supplied\. If not, prompt the user and read the password form the \fBSTDIN\fR\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\. +Read the password form the command line if [password] is supplied, or from \fBSTDIN\fR\. If not, prompt the user and read the password from the tty prompt\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\. .TP \fB\-c\fR, \fB\-\-config\fR Read the supplied YAML \fIfile\fR containing the options \fBbcrypt_rounds\fR and the \fBpassword_config\fR section containing the \fBpepper\fR value\. @@ -33,7 +36,17 @@ $2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8\.X8fWFpum7SxZ9MFe .fi .IP "" 0 .P -Hash from the STDIN: +Hash from the stdin: +.IP "" 4 +.nf +$ cat password_file | hash_password +Password: +Confirm password: +$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX\.rcuAbM8ErLoUhybG +.fi +.IP "" 0 +.P +Hash from the prompt: .IP "" 4 .nf $ hash_password @@ -53,6 +66,6 @@ $2b$12$CwI\.wBNr\.w3kmiUlV3T5s\.GT2wH7uebDCovDrCOh18dFedlANK99O .fi .IP "" 0 .SH "COPYRIGHT" -This man page was written by Rahul De <\fI\%mailto:rahulde@swecha\.net\fR> for Debian GNU/Linux distribution\. +This man page was written by Rahul De «rahulde@swecha\.net» for Debian GNU/Linux distribution\. .SH "SEE ALSO" synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1) diff --git a/debian/hash_password.1.html b/debian/hash_password.1.html new file mode 100644
index 0000000000..7a62787780 --- /dev/null +++ b/debian/hash_password.1.html
@@ -0,0 +1,182 @@ +<!DOCTYPE html> +<html> +<head> + <meta http-equiv='content-type' content='text/html;charset=utf-8'> + <meta name='generator' content='Ronn-NG/v0.10.1 (http://github.com/apjanke/ronn-ng/tree/0.10.1)'> + <title>hash_password(1) - Calculate the hash of a new password, so that passwords can be reset</title> + <style type='text/css' media='all'> + /* style: man */ + body#manpage {margin:0} + .mp {max-width:100ex;padding:0 9ex 1ex 4ex} + .mp p,.mp pre,.mp ul,.mp ol,.mp dl {margin:0 0 20px 0} + .mp h2 {margin:10px 0 0 0} + .mp > p,.mp > pre,.mp > ul,.mp > ol,.mp > dl {margin-left:8ex} + .mp h3 {margin:0 0 0 4ex} + .mp dt {margin:0;clear:left} + .mp dt.flush {float:left;width:8ex} + .mp dd {margin:0 0 0 9ex} + .mp h1,.mp h2,.mp h3,.mp h4 {clear:left} + .mp pre {margin-bottom:20px} + .mp pre+h2,.mp pre+h3 {margin-top:22px} + .mp h2+pre,.mp h3+pre {margin-top:5px} + .mp img {display:block;margin:auto} + .mp h1.man-title {display:none} + .mp,.mp code,.mp pre,.mp tt,.mp kbd,.mp samp,.mp h3,.mp h4 {font-family:monospace;font-size:14px;line-height:1.42857142857143} + .mp h2 {font-size:16px;line-height:1.25} + .mp h1 {font-size:20px;line-height:2} + .mp {text-align:justify;background:#fff} + .mp,.mp code,.mp pre,.mp pre code,.mp tt,.mp kbd,.mp samp {color:#131211} + .mp h1,.mp h2,.mp h3,.mp h4 {color:#030201} + .mp u {text-decoration:underline} + .mp code,.mp strong,.mp b {font-weight:bold;color:#131211} + .mp em,.mp var {font-style:italic;color:#232221;text-decoration:none} + .mp a,.mp a:link,.mp a:hover,.mp a code,.mp a pre,.mp a tt,.mp a kbd,.mp a samp {color:#0000ff} + .mp b.man-ref {font-weight:normal;color:#434241} + .mp pre {padding:0 4ex} + .mp pre code {font-weight:normal;color:#434241} + .mp h2+pre,h3+pre {padding-left:0} + ol.man-decor,ol.man-decor li {margin:3px 0 10px 0;padding:0;float:left;width:33%;list-style-type:none;text-transform:uppercase;color:#999;letter-spacing:1px} + ol.man-decor {width:100%} + ol.man-decor li.tl {text-align:left} + ol.man-decor li.tc {text-align:center;letter-spacing:4px} + ol.man-decor li.tr {text-align:right;float:right} + </style> +</head> +<!-- + The following styles are deprecated and will be removed at some point: + div#man, div#man ol.man, div#man ol.head, div#man ol.man. + + The .man-page, .man-decor, .man-head, .man-foot, .man-title, and + .man-navigation should be used instead. +--> +<body id='manpage'> + <div class='mp' id='man'> + + <div class='man-navigation' style='display:none'> + <a href="#NAME">NAME</a> + <a href="#SYNOPSIS">SYNOPSIS</a> + <a href="#DESCRIPTION">DESCRIPTION</a> + <a href="#FILES">FILES</a> + <a href="#OPTIONS">OPTIONS</a> + <a href="#EXAMPLES">EXAMPLES</a> + <a href="#COPYRIGHT">COPYRIGHT</a> + <a href="#SEE-ALSO">SEE ALSO</a> + </div> + + <ol class='man-decor man-head man head'> + <li class='tl'>hash_password(1)</li> + <li class='tc'></li> + <li class='tr'>hash_password(1)</li> + </ol> + + + +<h2 id="NAME">NAME</h2> +<p class="man-name"> + <code>hash_password</code> - <span class="man-whatis">Calculate the hash of a new password, so that passwords can be reset</span> +</p> +<h2 id="SYNOPSIS">SYNOPSIS</h2> + +<table> + <tbody> + <tr> + <td> +<code>hash_password</code> [<code>-p</code> +</td> + <td> +<code>--password</code> [password]] [<code>-c</code> +</td> + <td> +<code>--config</code> <var>file</var>]</td> + </tr> + </tbody> +</table> + +<h2 id="DESCRIPTION">DESCRIPTION</h2> + +<p><strong>hash_password</strong> calculates the hash of a supplied password using bcrypt.</p> + +<p><code>hash_password</code> takes a password as an parameter either on the command line +or the <code>STDIN</code> if not supplied.</p> + +<p>It accepts an YAML file which can be used to specify parameters like the +number of rounds for bcrypt and password_config section having the pepper +value used for the hashing. By default <code>bcrypt_rounds</code> is set to <strong>12</strong>.</p> + +<p>The hashed password is written on the <code>STDOUT</code>.</p> + +<h2 id="FILES">FILES</h2> + +<p>A sample YAML file accepted by <code>hash_password</code> is described below:</p> + +<p>bcrypt_rounds: 17 + password_config: + pepper: "random hashing pepper"</p> + +<h2 id="OPTIONS">OPTIONS</h2> + +<dl> +<dt> +<code>-p</code>, <code>--password</code> +</dt> +<dd>Read the password form the command line if [password] is supplied, or from <code>STDIN</code>. +If not, prompt the user and read the password from the tty prompt. +It is not recommended to type the password on the command line +directly. Use the STDIN instead.</dd> +<dt> +<code>-c</code>, <code>--config</code> +</dt> +<dd>Read the supplied YAML <var>file</var> containing the options <code>bcrypt_rounds</code> +and the <code>password_config</code> section containing the <code>pepper</code> value.</dd> +</dl> + +<h2 id="EXAMPLES">EXAMPLES</h2> + +<p>Hash from the command line:</p> + +<pre><code>$ hash_password -p "p@ssw0rd" +$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe +</code></pre> + +<p>Hash from the stdin:</p> + +<pre><code>$ cat password_file | hash_password +Password: +Confirm password: +$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG +</code></pre> + +<p>Hash from the prompt:</p> + +<pre><code>$ hash_password +Password: +Confirm password: +$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG +</code></pre> + +<p>Using a config file:</p> + +<pre><code>$ hash_password -c config.yml +Password: +Confirm password: +$2b$12$CwI.wBNr.w3kmiUlV3T5s.GT2wH7uebDCovDrCOh18dFedlANK99O +</code></pre> + +<h2 id="COPYRIGHT">COPYRIGHT</h2> + +<p>This man page was written by Rahul De «rahulde@swecha.net» +for Debian GNU/Linux distribution.</p> + +<h2 id="SEE-ALSO">SEE ALSO</h2> + +<p><span class="man-ref">synctl<span class="s">(1)</span></span>, <span class="man-ref">synapse_port_db<span class="s">(1)</span></span>, <span class="man-ref">register_new_matrix_user<span class="s">(1)</span></span>, <span class="man-ref">synapse_review_recent_signups<span class="s">(1)</span></span></p> + + <ol class='man-decor man-foot man foot'> + <li class='tl'></li> + <li class='tc'>August 2024</li> + <li class='tr'>hash_password(1)</li> + </ol> + + </div> +</body> +</html> diff --git a/debian/hash_password.ronn b/debian/hash_password.ronn
index 5d0df53802..b68d4a210e 100644 --- a/debian/hash_password.ronn +++ b/debian/hash_password.ronn
@@ -29,8 +29,8 @@ A sample YAML file accepted by `hash_password` is described below: ## OPTIONS * `-p`, `--password`: - Read the password form the command line if [password] is supplied. - If not, prompt the user and read the password form the `STDIN`. + Read the password form the command line if [password] is supplied, or from `STDIN`. + If not, prompt the user and read the password from the tty prompt. It is not recommended to type the password on the command line directly. Use the STDIN instead. @@ -45,7 +45,14 @@ Hash from the command line: $ hash_password -p "p@ssw0rd" $2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe -Hash from the STDIN: +Hash from the stdin: + + $ cat password_file | hash_password + Password: + Confirm password: + $2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG + +Hash from the prompt: $ hash_password Password: diff --git a/demo/start.sh b/demo/start.sh
index 06ec6f985f..e010302bf4 100755 --- a/demo/start.sh +++ b/demo/start.sh
@@ -138,6 +138,13 @@ for port in 8080 8081 8082; do per_user: per_second: 1000 burst_count: 1000 + rc_presence: + per_user: + per_second: 1000 + burst_count: 1000 + rc_delayed_event_mgmt: + per_second: 1000 + burst_count: 1000 RC ) echo "${ratelimiting}" >> "$port.config" diff --git a/docker/Dockerfile b/docker/Dockerfile
index 1da196b12e..15c458fa28 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile
@@ -20,45 +20,16 @@ # `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in # in `poetry export` in the past. -ARG PYTHON_VERSION=3.11 +ARG DEBIAN_VERSION=bookworm +ARG PYTHON_VERSION=3.12 +ARG POETRY_VERSION=2.1.1 ### ### Stage 0: generate requirements.txt ### -# We hardcode the use of Debian bookworm here because this could change upstream -# and other Dockerfiles used for testing are expecting bookworm. -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS requirements - -# RUN --mount is specific to buildkit and is documented at -# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount. -# Here we use it to set up a cache for apt (and below for pip), to improve -# rebuild speeds on slow connections. -RUN \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update -qq && apt-get install -yqq \ - build-essential curl git libffi-dev libssl-dev pkg-config \ - && rm -rf /var/lib/apt/lists/* - -# Install rust and ensure its in the PATH. -# (Rust may be needed to compile `cryptography`---which is one of poetry's -# dependencies---on platforms that don't have a `cryptography` wheel. -ENV RUSTUP_HOME=/rust -ENV CARGO_HOME=/cargo -ENV PATH=/cargo/bin:/rust/bin:$PATH -RUN mkdir /rust /cargo - -RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal - -# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not -# set to true, so we expose it as a build-arg. -ARG CARGO_NET_GIT_FETCH_WITH_CLI=false -ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI - -# We install poetry in its own build stage to avoid its dependencies conflicting with -# synapse's dependencies. -RUN --mount=type=cache,target=/root/.cache/pip \ - pip install --user "poetry==1.3.2" +### This stage is platform-agnostic, so we can use the build platform in case of cross-compilation. +### +FROM --platform=$BUILDPLATFORM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS requirements WORKDIR /synapse @@ -75,41 +46,30 @@ ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION # Instead, we'll just install what a regular `pip install` would from PyPI. ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE +# This silences a warning as uv isn't able to do hardlinks between its cache +# (mounted as --mount=type=cache) and the target directory. +ENV UV_LINK_MODE=copy + # Export the dependencies, but only if we're actually going to use the Poetry lockfile. # Otherwise, just create an empty requirements file so that the Dockerfile can # proceed. -RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ - /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \ +ARG POETRY_VERSION +RUN --mount=type=cache,target=/root/.cache/uv \ + if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ + uvx --with poetry-plugin-export==1.9.0 \ + poetry@${POETRY_VERSION} export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \ else \ - touch /synapse/requirements.txt; \ + touch /synapse/requirements.txt; \ fi ### ### Stage 1: builder ### -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm AS builder - -# install the OS build deps -RUN \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update -qq && apt-get install -yqq \ - build-essential \ - libffi-dev \ - libjpeg-dev \ - libpq-dev \ - libssl-dev \ - libwebp-dev \ - libxml++2.6-dev \ - libxslt1-dev \ - openssl \ - zlib1g-dev \ - git \ - curl \ - libicu-dev \ - pkg-config \ - && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS builder +# This silences a warning as uv isn't able to do hardlinks between its cache +# (mounted as --mount=type=cache) and the target directory. +ENV UV_LINK_MODE=copy # Install rust and ensure its in the PATH ENV RUSTUP_HOME=/rust @@ -119,7 +79,6 @@ RUN mkdir /rust /cargo RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal - # arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not # set to true, so we expose it as a build-arg. ARG CARGO_NET_GIT_FETCH_WITH_CLI=false @@ -131,8 +90,8 @@ ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI # # This is aiming at installing the `[tool.poetry.depdendencies]` from pyproject.toml. COPY --from=requirements /synapse/requirements.txt /synapse/ -RUN --mount=type=cache,target=/root/.cache/pip \ - pip install --prefix="/install" --no-deps --no-warn-script-location -r /synapse/requirements.txt +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --prefix="/install" --no-deps -r /synapse/requirements.txt # Copy over the rest of the synapse source code. COPY synapse /synapse/synapse/ @@ -146,41 +105,85 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE # Install the synapse package itself. # If we have populated requirements.txt, we don't install any dependencies # as we should already have those from the previous `pip install` step. -RUN --mount=type=cache,target=/synapse/target,sharing=locked \ +RUN \ + --mount=type=cache,target=/root/.cache/uv \ + --mount=type=cache,target=/synapse/target,sharing=locked \ --mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \ if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ - pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \ + uv pip install --prefix="/install" --no-deps /synapse[all]; \ else \ - pip install --prefix="/install" --no-warn-script-location /synapse[all]; \ + uv pip install --prefix="/install" /synapse[all]; \ fi ### -### Stage 2: runtime +### Stage 2: runtime dependencies download for ARM64 and AMD64 ### +FROM --platform=$BUILDPLATFORM docker.io/library/debian:${DEBIAN_VERSION} AS runtime-deps -FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm +# Tell apt to keep downloaded package files, as we're using cache mounts. +RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache -LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse' -LABEL org.opencontainers.image.documentation='https://github.com/element-hq/synapse/blob/master/docker/README.md' -LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git' -LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later' +# Add both target architectures +RUN dpkg --add-architecture arm64 +RUN dpkg --add-architecture amd64 +# Fetch the runtime dependencies debs for both architectures +# We do that by building a recursive list of packages we need to download with `apt-cache depends` +# and then downloading them with `apt-get download`. RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update -qq && apt-get install -yqq \ - curl \ - gosu \ - libjpeg62-turbo \ - libpq5 \ - libwebp7 \ - xmlsec1 \ - libjemalloc2 \ - libicu72 \ - libssl-dev \ - openssl \ - && rm -rf /var/lib/apt/lists/* + apt-get update -qq && \ + apt-cache depends --recurse --no-recommends --no-suggests --no-conflicts --no-breaks --no-replaces --no-enhances --no-pre-depends \ + curl \ + gosu \ + libjpeg62-turbo \ + libpq5 \ + libwebp7 \ + xmlsec1 \ + libjemalloc2 \ + libicu \ + | grep '^\w' > /tmp/pkg-list && \ + for arch in arm64 amd64; do \ + mkdir -p /tmp/debs-${arch} && \ + cd /tmp/debs-${arch} && \ + apt-get -o APT::Architecture="${arch}" download $(cat /tmp/pkg-list); \ + done + +# Extract the debs for each architecture +RUN \ + for arch in arm64 amd64; do \ + mkdir -p /install-${arch}/var/lib/dpkg/status.d/ && \ + for deb in /tmp/debs-${arch}/*.deb; do \ + package_name=$(dpkg-deb -I ${deb} | awk '/^ Package: .*$/ {print $2}'); \ + echo "Extracting: ${package_name}"; \ + dpkg --ctrl-tarfile $deb | tar -Ox ./control > /install-${arch}/var/lib/dpkg/status.d/${package_name}; \ + dpkg --extract $deb /install-${arch}; \ + done; \ + done + + +### +### Stage 3: runtime +### + +FROM docker.io/library/python:${PYTHON_VERSION}-slim-${DEBIAN_VERSION} + +ARG TARGETARCH + +LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse' +LABEL org.opencontainers.image.documentation='https://github.com/element-hq/synapse/blob/master/docker/README.md' +LABEL org.opencontainers.image.source='https://github.com/element-hq/synapse.git' +LABEL org.opencontainers.image.licenses='AGPL-3.0-or-later' +# On the runtime image, /lib is a symlink to /usr/lib, so we need to copy the +# libraries to the right place, else the `COPY` won't work. +# On amd64, we'll also have a /lib64 folder with ld-linux-x86-64.so.2, which is +# already present in the runtime image. +COPY --from=runtime-deps /install-${TARGETARCH}/lib /usr/lib +COPY --from=runtime-deps /install-${TARGETARCH}/etc /etc +COPY --from=runtime-deps /install-${TARGETARCH}/usr /usr +COPY --from=runtime-deps /install-${TARGETARCH}/var /var COPY --from=builder /install /usr/local COPY ./docker/start.py /start.py COPY ./docker/conf /conf diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers
index 2ceb6ab67c..6d0fc1440b 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers
@@ -2,18 +2,38 @@ ARG SYNAPSE_VERSION=latest ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION +ARG DEBIAN_VERSION=bookworm +ARG PYTHON_VERSION=3.12 -# first of all, we create a base image with an nginx which we can copy into the +# first of all, we create a base image with dependencies which we can copy into the # target image. For repeated rebuilds, this is much faster than apt installing # each time. -FROM docker.io/library/debian:bookworm-slim AS deps_base +FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-${DEBIAN_VERSION} AS deps_base + + # Tell apt to keep downloaded package files, as we're using cache mounts. + RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache + RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update -qq && \ DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \ - redis-server nginx-light + nginx-light + + RUN \ + # remove default page + rm /etc/nginx/sites-enabled/default && \ + # have nginx log to stderr/out + ln -sf /dev/stdout /var/log/nginx/access.log && \ + ln -sf /dev/stderr /var/log/nginx/error.log + + # --link-mode=copy silences a warning as uv isn't able to do hardlinks between its cache + # (mounted as --mount=type=cache) and the target directory. + RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install --link-mode=copy --prefix="/uv/usr/local" supervisor~=4.2 + + RUN mkdir -p /uv/etc/supervisor/conf.d # Similarly, a base to copy the redis server from. # @@ -21,31 +41,21 @@ FROM docker.io/library/debian:bookworm-slim AS deps_base # which makes it much easier to copy (but we need to make sure we use an image # based on the same debian version as the synapse image, to make sure we get # the expected version of libc. -FROM docker.io/library/redis:7-bookworm AS redis_base +FROM docker.io/library/redis:7-${DEBIAN_VERSION} AS redis_base # now build the final image, based on the the regular Synapse docker image FROM $FROM - # Install supervisord with pip instead of apt, to avoid installing a second - # copy of python. - RUN --mount=type=cache,target=/root/.cache/pip \ - pip install supervisor~=4.2 - RUN mkdir -p /etc/supervisor/conf.d - - # Copy over redis and nginx + # Copy over dependencies COPY --from=redis_base /usr/local/bin/redis-server /usr/local/bin - + COPY --from=deps_base /uv / COPY --from=deps_base /usr/sbin/nginx /usr/sbin COPY --from=deps_base /usr/share/nginx /usr/share/nginx COPY --from=deps_base /usr/lib/nginx /usr/lib/nginx COPY --from=deps_base /etc/nginx /etc/nginx - RUN rm /etc/nginx/sites-enabled/default - RUN mkdir /var/log/nginx /var/lib/nginx - RUN chown www-data /var/lib/nginx - - # have nginx log to stderr/out - RUN ln -sf /dev/stdout /var/log/nginx/access.log - RUN ln -sf /dev/stderr /var/log/nginx/error.log + COPY --from=deps_base /var/log/nginx /var/log/nginx + # chown to allow non-root user to write to http-*-temp-path dirs + COPY --from=deps_base --chown=www-data:root /var/lib/nginx /var/lib/nginx # Copy Synapse worker, nginx and supervisord configuration template files COPY ./docker/conf-workers/* /conf/ @@ -64,4 +74,4 @@ FROM $FROM # Replace the healthcheck with one which checks *all* the workers. The script # is generated by configure_workers_and_start.py. HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \ - CMD /bin/sh /healthcheck.sh + CMD ["/healthcheck.sh"] diff --git a/docker/README.md b/docker/README.md
index 8dba6fdb05..3438e9c441 100644 --- a/docker/README.md +++ b/docker/README.md
@@ -114,6 +114,9 @@ The following environment variables are supported in `run` mode: is set via `docker run --user`, defaults to `991`, `991`. Note that this user must have permission to read the config files, and write to the data directories. * `TZ`: the [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) the container will run with. Defaults to `UTC`. +* `SYNAPSE_HTTP_PROXY`: Passed through to the Synapse process as the `http_proxy` environment variable. +* `SYNAPSE_HTTPS_PROXY`: Passed through to the Synapse process as the `https_proxy` environment variable. +* `SYNAPSE_NO_PROXY`: Passed through to the Synapse process as `no_proxy` environment variable. For more complex setups (e.g. for workers) you can also pass your args directly to synapse using `run` mode. For example like this: diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile
index ce82c400eb..6ed084fe5d 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile
@@ -9,6 +9,9 @@ ARG SYNAPSE_VERSION=latest # This is an intermediate image, to be built locally (not pulled from a registry). ARG FROM=matrixdotorg/synapse-workers:$SYNAPSE_VERSION +ARG DEBIAN_VERSION=bookworm + +FROM docker.io/library/postgres:13-${DEBIAN_VERSION} AS postgres_base FROM $FROM # First of all, we copy postgres server from the official postgres image, @@ -20,9 +23,9 @@ FROM $FROM # the same debian version as Synapse's docker image (so the versions of the # shared libraries match). RUN adduser --system --uid 999 postgres --home /var/lib/postgresql -COPY --from=docker.io/library/postgres:13-bookworm /usr/lib/postgresql /usr/lib/postgresql -COPY --from=docker.io/library/postgres:13-bookworm /usr/share/postgresql /usr/share/postgresql -RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql +COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql +COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql +COPY --from=postgres_base --chown=postgres /var/run/postgresql /var/run/postgresql ENV PATH="${PATH}:/usr/lib/postgresql/13/bin" ENV PGDATA=/var/lib/postgresql/data @@ -55,4 +58,4 @@ ENTRYPOINT ["/start_for_complement.sh"] # Update the healthcheck to have a shorter check interval HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ - CMD /bin/sh /healthcheck.sh + CMD ["/healthcheck.sh"] diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh
index cc798a3210..a5e06396e2 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh
@@ -5,12 +5,12 @@ set -e echo "Complement Synapse launcher" -echo " Args: $@" +echo " Args: $*" echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR" function log { - d=$(date +"%Y-%m-%d %H:%M:%S,%3N") - echo "$d $@" + d=$(printf '%(%Y-%m-%d %H:%M:%S)T,%.3s\n' ${EPOCHREALTIME/./ }) + echo "$d $*" } # Set the server name of the homeserver @@ -103,12 +103,11 @@ fi # Note that both the key and certificate are in PEM format (not DER). # First generate a configuration file to set up a Subject Alternative Name. -cat > /conf/server.tls.conf <<EOF +echo "\ .include /etc/ssl/openssl.cnf [SAN] -subjectAltName=DNS:${SERVER_NAME} -EOF +subjectAltName=DNS:${SERVER_NAME}" > /conf/server.tls.conf # Generate an RSA key openssl genrsa -out /conf/server.tls.key 2048 @@ -123,12 +122,12 @@ openssl x509 -req -in /conf/server.tls.csr \ -out /conf/server.tls.crt -extfile /conf/server.tls.conf -extensions SAN # Assert that we have a Subject Alternative Name in the certificate. -# (grep will exit with 1 here if there isn't a SAN in the certificate.) -openssl x509 -in /conf/server.tls.crt -noout -text | grep DNS: +# (the test will exit with 1 here if there isn't a SAN in the certificate.) +[[ $(openssl x509 -in /conf/server.tls.crt -noout -text) == *DNS:* ]] export SYNAPSE_TLS_CERT=/conf/server.tls.crt export SYNAPSE_TLS_KEY=/conf/server.tls.key # Run the script that writes the necessary config files and starts supervisord, which in turn # starts everything else -exec /configure_workers_and_start.py +exec /configure_workers_and_start.py "$@" diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2
index 6588b3ce14..48b44ddf90 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2
@@ -7,6 +7,7 @@ #} ## Server ## +public_baseurl: http://127.0.0.1:8008/ report_stats: False trusted_key_servers: [] enable_registration: true @@ -84,6 +85,18 @@ rc_invites: per_user: per_second: 1000 burst_count: 1000 + per_issuer: + per_second: 1000 + burst_count: 1000 + +rc_presence: + per_user: + per_second: 9999 + burst_count: 9999 + +rc_delayed_event_mgmt: + per_second: 9999 + burst_count: 9999 federation_rr_transactions_per_room_per_second: 9999 @@ -104,6 +117,18 @@ experimental_features: msc3967_enabled: true # Expose a room summary for public rooms msc3266_enabled: true + # Send to-device messages to application services + msc2409_to_device_messages_enabled: true + # Allow application services to masquerade devices + msc3202_device_masquerading: true + # Sending device list changes, one-time key counts and fallback key usage to application services + msc3202_transaction_extensions: true + # Proxy OTK claim requests to exclusive ASes + msc3983_appservice_otk_claims: true + # Proxy key queries to exclusive ASes + msc3984_appservice_key_query: true + # Invite filtering + msc4155_enabled: true server_notices: system_mxid_localpart: _server @@ -111,10 +136,18 @@ server_notices: system_mxid_avatar_url: "" room_name: "Server Alert" +# Enable delayed events (msc4140) +max_event_delay_duration: 24h + # Disable sync cache so that initial `/sync` requests are up-to-date. caches: sync_response_cache_duration: 0 +# Complement assumes that it can publish to the room list by default. +room_list_publication_rules: + - action: allow + + {% include "shared-orig.yaml.j2" %} diff --git a/docker/conf-workers/nginx.conf.j2 b/docker/conf-workers/nginx.conf.j2
index d1e02af723..95d2f760d2 100644 --- a/docker/conf-workers/nginx.conf.j2 +++ b/docker/conf-workers/nginx.conf.j2
@@ -38,10 +38,13 @@ server { {% if using_unix_sockets %} proxy_pass http://unix:/run/main_public.sock; {% else %} + # note: do not add a path (even a single /) after the port in `proxy_pass`, + # otherwise nginx will canonicalise the URI and cause signature verification + # errors. proxy_pass http://localhost:8080; {% endif %} proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Host $host; + proxy_set_header Host $host:$server_port; } } diff --git a/docker/conf-workers/synapse.supervisord.conf.j2 b/docker/conf-workers/synapse.supervisord.conf.j2
index 481eb4fc92..4fb11b259e 100644 --- a/docker/conf-workers/synapse.supervisord.conf.j2 +++ b/docker/conf-workers/synapse.supervisord.conf.j2
@@ -1,5 +1,6 @@ {% if use_forking_launcher %} [program:synapse_fork] +environment=http_proxy="%(ENV_SYNAPSE_HTTP_PROXY)s",https_proxy="%(ENV_SYNAPSE_HTTPS_PROXY)s",no_proxy="%(ENV_SYNAPSE_NO_PROXY)s" command=/usr/local/bin/python -m synapse.app.complement_fork_starter {{ main_config_path }} synapse.app.homeserver @@ -20,6 +21,7 @@ exitcodes=0 {% else %} [program:synapse_main] +environment=http_proxy="%(ENV_SYNAPSE_HTTP_PROXY)s",https_proxy="%(ENV_SYNAPSE_HTTPS_PROXY)s",no_proxy="%(ENV_SYNAPSE_NO_PROXY)s" command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml @@ -36,6 +38,7 @@ exitcodes=0 {% for worker in workers %} [program:synapse_{{ worker.name }}] +environment=http_proxy="%(ENV_SYNAPSE_HTTP_PROXY)s",https_proxy="%(ENV_SYNAPSE_HTTPS_PROXY)s",no_proxy="%(ENV_SYNAPSE_NO_PROXY)s" command=/usr/local/bin/prefix-log /usr/local/bin/python -m {{ worker.app }} --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py
index 15d8d7b558..102a88fad1 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py
@@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/local/bin/python # # This file is licensed under the Affero General Public License (AGPL) version 3. # @@ -202,6 +202,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "app": "synapse.app.generic_worker", "listener_resources": ["federation"], "endpoint_patterns": [ + "^/_matrix/federation/v1/version$", "^/_matrix/federation/(v1|v2)/event/", "^/_matrix/federation/(v1|v2)/state/", "^/_matrix/federation/(v1|v2)/state_ids/", @@ -351,6 +352,11 @@ def error(txt: str) -> NoReturn: def flush_buffers() -> None: + """ + Python's `print()` buffers output by default, typically waiting until ~8KB + accumulates. This method can be used to flush the buffers so we can see the output + of any print statements so far. + """ sys.stdout.flush() sys.stderr.flush() @@ -376,9 +382,11 @@ def convert(src: str, dst: str, **template_vars: object) -> None: # # We use append mode in case the files have already been written to by something else # (for instance, as part of the instructions in a dockerfile). + exists = os.path.isfile(dst) with open(dst, "a") as outfile: # In case the existing file doesn't end with a newline - outfile.write("\n") + if exists: + outfile.write("\n") outfile.write(rendered) @@ -604,7 +612,7 @@ def generate_base_homeserver_config() -> None: # start.py already does this for us, so just call that. # note that this script is copied in in the official, monolith dockerfile os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT) - subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True) + subprocess.run([sys.executable, "/start.py", "migrate_config"], check=True) def parse_worker_types( @@ -998,6 +1006,7 @@ def generate_worker_files( "/healthcheck.sh", healthcheck_urls=healthcheck_urls, ) + os.chmod("/healthcheck.sh", 0o755) # Ensure the logging directory exists log_dir = data_dir + "/logs" @@ -1099,6 +1108,13 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None: else: log("Could not find %s, will not use" % (jemallocpath,)) + # Empty strings are falsy in Python so this default is fine. We just can't have these + # be undefined because supervisord will complain about our + # `%(ENV_SYNAPSE_HTTP_PROXY)s` usage. + environ.setdefault("SYNAPSE_HTTP_PROXY", "") + environ.setdefault("SYNAPSE_HTTPS_PROXY", "") + environ.setdefault("SYNAPSE_NO_PROXY", "") + # Start supervisord, which will start Synapse, all of the configured worker # processes, redis, nginx etc. according to the config we created above. log("Starting supervisord") diff --git a/docker/prefix-log b/docker/prefix-log
index 32dddbbfd4..2a38de5686 100755 --- a/docker/prefix-log +++ b/docker/prefix-log
@@ -10,6 +10,9 @@ # '-W interactive' is a `mawk` extension which disables buffering on stdout and sets line-buffered reads on # stdin. The effect is that the output is flushed after each line, rather than being batched, which helps reduce # confusion due to to interleaving of the different processes. -exec 1> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&1) -exec 2> >(awk -W interactive '{print "'"${SUPERVISOR_PROCESS_NAME}"' | "$0 }' >&2) +prefixer() { + mawk -W interactive '{printf("%s | %s\n", ENVIRON["SUPERVISOR_PROCESS_NAME"], $0); fflush() }' +} +exec 1> >(prefixer) +exec 2> >(prefixer >&2) exec "$@" diff --git a/docker/start.py b/docker/start.py
index 818a5355ca..0be9976a0c 100755 --- a/docker/start.py +++ b/docker/start.py
@@ -22,6 +22,11 @@ def error(txt: str) -> NoReturn: def flush_buffers() -> None: + """ + Python's `print()` buffers output by default, typically waiting until ~8KB + accumulates. This method can be used to flush the buffers so we can see the output + of any print statements so far. + """ sys.stdout.flush() sys.stderr.flush() diff --git a/docs/README.md b/docs/README.md
index 0b2b910c73..7802d3c3ce 100644 --- a/docs/README.md +++ b/docs/README.md
@@ -63,6 +63,18 @@ mdbook serve The URL at which the docs can be viewed at will be logged. +## Synapse configuration documentation + +The [Configuration +Manual](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html) +page is generated from a YAML file, +[schema/synapse-config.schema.yaml](../schema/synapse-config.schema.yaml). To +add new options or modify existing ones, first edit that file, then run +[scripts-dev/gen_config_documentation.py](../scripts-dev/gen_config_documentation.py) +to generate an updated Configuration Manual markdown file. + +Build the book as described above to preview it in a web browser. + ## Configuration and theming The look and behaviour of the website is configured by the [book.toml](../book.toml) file diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index c50121d5f7..f91d290f2f 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md
@@ -27,8 +27,6 @@ - [User Authentication](usage/configuration/user_authentication/README.md) - [Single-Sign On](usage/configuration/user_authentication/single_sign_on/README.md) - [OpenID Connect](openid.md) - - [SAML](usage/configuration/user_authentication/single_sign_on/saml.md) - - [CAS](usage/configuration/user_authentication/single_sign_on/cas.md) - [SSO Mapping Providers](sso_mapping_providers.md) - [Password Auth Providers](password_auth_providers.md) - [JSON Web Tokens](jwt.md) @@ -49,11 +47,14 @@ - [Background update controller callbacks](modules/background_update_controller_callbacks.md) - [Account data callbacks](modules/account_data_callbacks.md) - [Add extra fields to client events unsigned section callbacks](modules/add_extra_fields_to_client_events_unsigned.md) + - [Media repository callbacks](modules/media_repository_callbacks.md) + - [Ratelimit callbacks](modules/ratelimit_callbacks.md) - [Porting a legacy module to the new interface](modules/porting_legacy_module.md) - [Workers](workers.md) - [Using `synctl` with Workers](synctl_workers.md) - [Systemd](systemd-with-workers/README.md) - [Administration](usage/administration/README.md) + - [Backups](usage/administration/backups.md) - [Admin API](usage/administration/admin_api/README.md) - [Account Validity](admin_api/account_validity.md) - [Background Updates](usage/administration/admin_api/background_updates.md) @@ -65,6 +66,7 @@ - [Registration Tokens](usage/administration/admin_api/registration_tokens.md) - [Manipulate Room Membership](admin_api/room_membership.md) - [Rooms](admin_api/rooms.md) + - [Scheduled tasks](admin_api/scheduled_tasks.md) - [Server Notices](admin_api/server_notices.md) - [Statistics](admin_api/statistics.md) - [Users](admin_api/user_admin_api.md) @@ -102,9 +104,6 @@ - [TCP Replication](tcp_replication.md) - [Faster remote joins](development/synapse_architecture/faster_joins.md) - [Internal Documentation](development/internal_documentation/README.md) - - [Single Sign-On]() - - [SAML](development/saml.md) - - [CAS](development/cas.md) - [Room DAG concepts](development/room-dag-concepts.md) - [State Resolution]() - [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md) diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md
index 83f7dc37f4..9075e92882 100644 --- a/docs/admin_api/event_reports.md +++ b/docs/admin_api/event_reports.md
@@ -60,10 +60,11 @@ paginate through. anything other than the return value of `next_token` from a previous call. Defaults to `0`. * `dir`: string - Direction of event report order. Whether to fetch the most recent first (`b`) or the oldest first (`f`). Defaults to `b`. -* `user_id`: string - Is optional and filters to only return users with user IDs that - contain this value. This is the user who reported the event and wrote the reason. -* `room_id`: string - Is optional and filters to only return rooms with room IDs that - contain this value. +* `user_id`: optional string - Filter by the user ID of the reporter. This is the user who reported the event + and wrote the reason. +* `room_id`: optional string - Filter by room id. +* `event_sender_user_id`: optional string - Filter by the sender of the reported event. This is the user who + the report was made against. **Response** diff --git a/docs/admin_api/experimental_features.md b/docs/admin_api/experimental_features.md
index ef1b58c9ba..e32728e56d 100644 --- a/docs/admin_api/experimental_features.md +++ b/docs/admin_api/experimental_features.md
@@ -5,6 +5,7 @@ basis. The currently supported features are: - [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications for another client - [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support +- [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222): adding `state_after` to sync v2 To use it, you will need to authenticate by providing an `access_token` for a server admin: see [Admin API](../usage/administration/admin_api/). diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index 30833f3109..1177711c1e 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md
@@ -46,6 +46,14 @@ to any local media, and any locally-cached copies of remote media. The media file itself (and any thumbnails) is not deleted from the server. +Since Synapse 1.128.0, hashes of uploaded media are tracked. If this media +is quarantined, Synapse will: + + - Quarantine any media with a matching hash that has already been uploaded. + - Quarantine any future media. + - Quarantine any existing cached remote media. + - Quarantine any future remote media. + ## Quarantining media by ID This API quarantines a single piece of local or remote media. diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index 8e3a367e90..bdda9b47ad 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md
@@ -385,6 +385,13 @@ The API is: GET /_synapse/admin/v1/rooms/<room_id>/state ``` +**Parameters** + +The following query parameter is available: + +* `type` - The type of room state event to filter by, eg "m.room.create". If provided, only state events + of this type will be returned (regardless of their `state_key` value). + A response body like the following is returned: ```json @@ -787,6 +794,7 @@ A response body like the following is returned: "results": [ { "delete_id": "delete_id1", + "room_id": "!roomid:example.com", "status": "failed", "error": "error message", "shutdown_room": { @@ -797,6 +805,7 @@ A response body like the following is returned: } }, { "delete_id": "delete_id2", + "room_id": "!roomid:example.com", "status": "purging", "shutdown_room": { "kicked_users": [ @@ -835,6 +844,8 @@ A response body like the following is returned: ```json { "status": "purging", + "delete_id": "bHkCNQpHqOaFhPtK", + "room_id": "!roomid:example.com", "shutdown_room": { "kicked_users": [ "@foobar:example.com" @@ -862,7 +873,8 @@ The following fields are returned in the JSON response body: - `results` - An array of objects, each containing information about one task. This field is omitted from the result when you query by `delete_id`. Task objects contain the following fields: - - `delete_id` - The ID for this purge if you query by `room_id`. + - `delete_id` - The ID for this purge + - `room_id` - The ID of the room being deleted - `status` - The status will be one of: - `shutting_down` - The process is removing users from the room. - `purging` - The process is purging the room and event data from database. diff --git a/docs/admin_api/scheduled_tasks.md b/docs/admin_api/scheduled_tasks.md new file mode 100644
index 0000000000..b80da5083c --- /dev/null +++ b/docs/admin_api/scheduled_tasks.md
@@ -0,0 +1,54 @@ +# Show scheduled tasks + +This API returns information about scheduled tasks. + +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api/). + +The api is: +``` +GET /_synapse/admin/v1/scheduled_tasks +``` + +It returns a JSON body like the following: + +```json +{ + "scheduled_tasks": [ + { + "id": "GSA124oegf1", + "action": "shutdown_room", + "status": "complete", + "timestamp_ms": 23423523, + "resource_id": "!roomid", + "result": "some result", + "error": null + } + ] +} +``` + +**Query parameters:** + +* `action_name`: string - Is optional. Returns only the scheduled tasks with the given action name. +* `resource_id`: string - Is optional. Returns only the scheduled tasks with the given resource id. +* `status`: string - Is optional. Returns only the scheduled tasks matching the given status, one of + - "scheduled" - Task is scheduled but not active + - "active" - Task is active and probably running, and if not will be run on next scheduler loop run + - "complete" - Task has completed successfully + - "failed" - Task is over and either returned a failed status, or had an exception + +* `max_timestamp`: int - Is optional. Returns only the scheduled tasks with a timestamp inferior to the specified one. + +**Response** + +The following fields are returned in the JSON response body along with a `200` HTTP status code: + +* `id`: string - ID of scheduled task. +* `action`: string - The name of the scheduled task's action. +* `status`: string - The status of the scheduled task. +* `timestamp_ms`: integer - The timestamp (in milliseconds since the unix epoch) of the given task - If the status is "scheduled" then this represents when it should be launched. + Otherwise it represents the last time this task got a change of state. +* `resource_id`: Optional string - The resource id of the scheduled task, if it possesses one +* `result`: Optional Json - Any result of the scheduled task, if given +* `error`: Optional string - If the task has the status "failed", the error associated with this failure diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 2281385830..d526072d2f 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md
@@ -19,20 +19,6 @@ It returns a JSON body like the following: { "name": "@user:example.com", "displayname": "User", // can be null if not set - "threepids": [ - { - "medium": "email", - "address": "<user_mail_1>", - "added_at": 1586458409743, - "validated_at": 1586458409743 - }, - { - "medium": "email", - "address": "<user_mail_2>", - "added_at": 1586458409743, - "validated_at": 1586458409743 - } - ], "avatar_url": "<avatar_url>", // can be null if not set "is_guest": 0, "admin": 0, @@ -40,6 +26,7 @@ It returns a JSON body like the following: "erased": false, "shadow_banned": 0, "creation_ts": 1560432506, + "last_seen_ts": 1732919539393, "appservice_id": null, "consent_server_notice_sent": null, "consent_version": null, @@ -55,7 +42,8 @@ It returns a JSON body like the following: } ], "user_type": null, - "locked": false + "locked": false, + "suspended": false } ``` @@ -82,16 +70,6 @@ with a body of: "logout_devices": false, "displayname": "Alice Marigold", "avatar_url": "mxc://example.com/abcde12345", - "threepids": [ - { - "medium": "email", - "address": "alice@example.com" - }, - { - "medium": "email", - "address": "alice@domain.org" - } - ], "external_ids": [ { "auth_provider": "example", @@ -128,15 +106,6 @@ Body parameters: - `avatar_url` - **string**, optional. Must be a [MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris). If set to an empty string (`""`), the user's avatar is removed. -- `threepids` - **array**, optional. If provided, the user's third-party IDs (email, msisdn) are - entirely replaced with the given list. Each item in the array is an object with the following - fields: - - `medium` - **string**, required. The type of third-party ID, either `email` or `msisdn` (phone number). - - `address` - **string**, required. The third-party ID itself, e.g. `alice@example.com` for `email` or - `447470274584` (for a phone number with country code "44") and `19254857364` (for a phone number - with country code "1") for `msisdn`. - Note: If a threepid is removed from a user via this option, Synapse will also attempt to remove - that threepid from any identity servers it is aware has a binding for it. - `external_ids` - **array**, optional. Allow setting the identifier of the external identity provider for SSO (Single sign-on). More details are in the configuration manual under the sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers). @@ -161,7 +130,8 @@ Body parameters: - `locked` - **bool**, optional. If unspecified, locked state will be left unchanged. - `user_type` - **string** or null, optional. If not provided, the user type will be not be changed. If `null` is given, the user type will be cleared. - Other allowed options are: `bot` and `support`. + Other allowed options are: `bot` and `support` and any extra values defined in the homserver + [configuration](../usage/configuration/config_documentation.md#user_types). ## List Accounts ### List Accounts (V2) @@ -412,6 +382,32 @@ The following actions are **NOT** performed. The list may be incomplete. - Remove from monthly active users - Remove user's consent information (consent version and timestamp) +## Suspend/Unsuspend Account + +This API allows an admin to suspend/unsuspend an account. While an account is suspended, the user is +prohibited from sending invites, joining or knocking on rooms, sending messages, changing profile data, and redacting messages other than their own. + +The api is: + +``` +PUT /_synapse/admin/v1/suspend/<user_id> +``` + +with a body of: + +```json +{ + "suspend": true +} +``` + +To unsuspend a user, use the same endpoint with a body of: +```json +{ + "suspend": false +} +``` + ## Reset password **Note:** This API is disabled when MSC3861 is enabled. [See #15582](https://github.com/matrix-org/synapse/pull/15582) @@ -476,9 +472,9 @@ with a body of: } ``` -## List room memberships of a user +## List joined rooms of a user -Gets a list of all `room_id` that a specific `user_id` is member. +Gets a list of all `room_id` that a specific `user_id` is joined to and is a member of (participating in). The API is: @@ -515,6 +511,73 @@ The following fields are returned in the JSON response body: - `joined_rooms` - An array of `room_id`. - `total` - Number of rooms. +## Get the number of invites sent by the user + +Fetches the number of invites sent by the provided user ID across all rooms +after the given timestamp. + +``` +GET /_synapse/admin/v1/users/$user_id/sent_invite_count +``` + +**Parameters** + +The following parameters should be set in the URL: + +* `user_id`: fully qualified: for example, `@user:server.com` + +The following should be set as query parameters in the URL: + +* `from_ts`: int, required. A timestamp in ms from the unix epoch. Only + invites sent at or after the provided timestamp will be returned. + This works by comparing the provided timestamp to the `received_ts` + column in the `events` table. + Note: https://currentmillis.com/ is a useful tool for converting dates + into timestamps and vice versa. + +A response body like the following is returned: + +```json +{ + "invite_count": 30 +} +``` + +_Added in Synapse 1.122.0_ + +## Get the cumulative number of rooms a user has joined after a given timestamp + +Fetches the number of rooms that the user joined after the given timestamp, even +if they have subsequently left/been banned from those rooms. + +``` +GET /_synapse/admin/v1/users/$<user_id/cumulative_joined_room_count +``` + +**Parameters** + +The following parameters should be set in the URL: + +* `user_id`: fully qualified: for example, `@user:server.com` + +The following should be set as query parameters in the URL: + +* `from_ts`: int, required. A timestamp in ms from the unix epoch. Only + invites sent at or after the provided timestamp will be returned. + This works by comparing the provided timestamp to the `received_ts` + column in the `events` table. + Note: https://currentmillis.com/ is a useful tool for converting dates + into timestamps and vice versa. + +A response body like the following is returned: + +```json +{ + "cumulative_joined_room_count": 30 +} +``` +_Added in Synapse 1.122.0_ + ## Account Data Gets information about account data for a specific `user_id`. @@ -859,7 +922,8 @@ A response body like the following is returned: "last_seen_ip": "1.2.3.4", "last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0", "last_seen_ts": 1474491775024, - "user_id": "<user_id>" + "user_id": "<user_id>", + "dehydrated": false }, { "device_id": "AUIECTSRND", @@ -867,7 +931,8 @@ A response body like the following is returned: "last_seen_ip": "1.2.3.5", "last_seen_user_agent": "Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0", "last_seen_ts": 1474491775025, - "user_id": "<user_id>" + "user_id": "<user_id>", + "dehydrated": false } ], "total": 2 @@ -897,6 +962,7 @@ The following fields are returned in the JSON response body: - `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this devices was last seen. (May be a few minutes out of date, for efficiency reasons). - `user_id` - Owner of device. + - `dehydrated` - Whether the device is a dehydrated device. - `total` - Total number of user's devices. @@ -1306,7 +1372,7 @@ When a user matched the given ID for the given provider, an HTTP code `200` with The following parameters should be set in the URL: - `provider` - The ID of the authentication provider, as advertised by the [`GET /_matrix/client/v3/login`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login) API in the `m.login.sso` authentication method. -- `external_id` - The user ID from the authentication provider. Usually corresponds to the `sub` claim for OIDC providers, or to the `uid` attestation for SAML2 providers. +- `external_id` - The user ID from the authentication provider. Usually corresponds to the `sub` claim for OIDC providers. The `external_id` may have characters that are not URL-safe (typically `/`, `:` or `@`), so it is advised to URL-encode those parameters. @@ -1324,19 +1390,28 @@ Returns a `404` HTTP status code if no user was found, with a response body like _Added in Synapse 1.68.0._ -## Find a user based on their Third Party ID (ThreePID or 3PID) +## Redact all the events of a user -The API is: +This endpoint allows an admin to redact the events of a given user. There are no restrictions on redactions for a +local user. By default, we puppet the user who sent the message to redact it themselves. Redactions for non-local users are issued using the admin user, and will fail in rooms where the admin user is not admin/does not have the specified power level to issue redactions. +The API is ``` -GET /_synapse/admin/v1/threepid/$medium/users/$address +POST /_synapse/admin/v1/user/$user_id/redact + +{ + "rooms": ["!roomid1", "!roomid2"] +} ``` +If an empty list is provided as the key for `rooms`, all events in all the rooms the user is member of will be redacted, +otherwise all the events in the rooms provided in the request will be redacted. -When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned: +The API starts redaction process running, and returns immediately with a JSON body with +a redact id which can be used to query the status of the redaction process: ```json { - "user_id": "@hello:example.org" + "redact_id": "<opaque id>" } ``` @@ -1344,20 +1419,57 @@ When a user matched the given address for the given medium, an HTTP code `200` w The following parameters should be set in the URL: -- `medium` - Kind of third-party ID, either `email` or `msisdn`. -- `address` - Value of the third-party ID. +- `user_id` - The fully qualified MXID of the user: for example, `@user:server.com`. -The `address` may have characters that are not URL-safe, so it is advised to URL-encode those parameters. +The following JSON body parameter must be provided: -**Errors** +- `rooms` - A list of rooms to redact the user's events in. If an empty list is provided all events in all rooms + the user is a member of will be redacted -Returns a `404` HTTP status code if no user was found, with a response body like this: +The following JSON body parameters are optional: -```json +- `reason` - Reason the redaction is being requested, ie "spam", "abuse", etc. This will be included in each redaction event, and be visible to users. +- `limit` - a limit on the number of the user's events to search for ones that can be redacted (events are redacted newest to oldest) in each room, defaults to 1000 if not provided + +_Added in Synapse 1.116.0._ + + +## Check the status of a redaction process + +It is possible to query the status of the background task for redacting a user's events. +The status can be queried up to 24 hours after completion of the task, +or until Synapse is restarted (whichever happens first). + +The API is: + +``` +GET /_synapse/admin/v1/user/redact_status/$redact_id +``` + +A response body like the following is returned: + +``` { - "errcode":"M_NOT_FOUND", - "error":"User not found" + "status": "active", + "failed_redactions": [], } ``` -_Added in Synapse 1.72.0._ +**Parameters** + +The following parameters should be set in the URL: + +* `redact_id` - string - The ID for this redaction process, provided when the redaction was requested. + + +**Response** + +The following fields are returned in the JSON response body: + +- `status` - string - one of scheduled/active/completed/failed, indicating the status of the redaction job +- `failed_redactions` - dictionary - the keys of the dict are event ids the process was unable to redact, if any, and the values are + the corresponding error that caused the redaction to fail + +_Added in Synapse 1.116.0._ + + diff --git a/docs/changelogs/CHANGES-2023.md b/docs/changelogs/CHANGES-2023.md new file mode 100644
index 0000000000..9b6ad3de1b --- /dev/null +++ b/docs/changelogs/CHANGES-2023.md
@@ -0,0 +1,2202 @@ +# Synapse 1.98.0 (2023-12-12) + +Synapse 1.98.0 will be the last Synapse release in 2023; the regular release cadence will resume in January 2024. + +Synapse will soon be forked by Element under an AGPLv3.0 licence (with CLA, for +proprietary dual licensing). You can read more about this here: + + - https://matrix.org/blog/2023/11/06/future-of-synapse-dendrite/ + - https://element.io/blog/element-to-adopt-agplv3/ + +The Matrix.org Foundation copy of the project will be archived. Any changes needed +by server administrators will be communicated via our usual announcements channels, +but we are striving to make this as seamless as possible. + + +No significant changes since 1.98.0rc1. + + + +# Synapse 1.98.0rc1 (2023-12-05) + +### Features + +- Synapse now declares support for Matrix v1.7, v1.8, and v1.9. ([\#16707](https://github.com/matrix-org/synapse/issues/16707)) +- Add `on_user_login` [module API](https://matrix-org.github.io/synapse/latest/modules/writing_a_module.html) callback for when a user logs in. ([\#15207](https://github.com/matrix-org/synapse/issues/15207)) +- Support [MSC4069: Inhibit profile propagation](https://github.com/matrix-org/matrix-spec-proposals/pull/4069). ([\#16636](https://github.com/matrix-org/synapse/issues/16636)) +- Restore tracking of requests and monthly active users when delegating authentication via [MSC3861](https://github.com/matrix-org/synapse/pull/16672) to an OIDC provider. ([\#16672](https://github.com/matrix-org/synapse/issues/16672)) +- Add an autojoin setting for server notices rooms, so users may be joined directly instead of receiving an invite. ([\#16699](https://github.com/matrix-org/synapse/issues/16699)) +- Follow redirects when downloading media over federation (per [MSC3860](https://github.com/matrix-org/matrix-spec-proposals/pull/3860)). ([\#16701](https://github.com/matrix-org/synapse/issues/16701)) + +### Bugfixes + +- Enable refreshable tokens on the admin registration endpoint. ([\#16642](https://github.com/matrix-org/synapse/issues/16642)) +- Consistently bypass rate limits when using the server notice admin API. ([\#16670](https://github.com/matrix-org/synapse/issues/16670)) +- Fix a bug introduced in Synapse 1.7.2 where rooms whose power levels lacked an `events` field could not be upgraded. ([\#16725](https://github.com/matrix-org/synapse/issues/16725)) +- Fix `GET /_synapse/admin/v1/federation/destinations` [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) returning null (instead of 0) for `retry_last_ts` and `retry_interval`. ([\#16729](https://github.com/matrix-org/synapse/issues/16729)) + +### Improved Documentation + +- Add schema rollback information to documentation. ([\#16661](https://github.com/matrix-org/synapse/issues/16661)) +- Fix poetry version typo in the [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html). ([\#16695](https://github.com/matrix-org/synapse/issues/16695)) +- Switch the example UNIX socket paths to `/run`. Add HAProxy example configuration for UNIX sockets. ([\#16700](https://github.com/matrix-org/synapse/issues/16700)) +- Add documentation for how to validate the configuration file with `synapse.config` script. ([\#16714](https://github.com/matrix-org/synapse/issues/16714)) + +### Internal Changes + +- Clean-up unused tables. ([\#16522](https://github.com/matrix-org/synapse/issues/16522)) +- Reduce a little database load while processing state auth chains. ([\#16552](https://github.com/matrix-org/synapse/issues/16552)) +- Reduce database load of pruning old `user_ips`. ([\#16667](https://github.com/matrix-org/synapse/issues/16667)) +- Reduce DB load when forget on leave setting is disabled. ([\#16668](https://github.com/matrix-org/synapse/issues/16668)) +- Ignore `encryption_enabled_by_default_for_room_type` setting when creating server notices room, since the notices will be send unencrypted anyway. ([\#16677](https://github.com/matrix-org/synapse/issues/16677)) +- Correctly read the to-device stream ID on startup using SQLite. ([\#16682](https://github.com/matrix-org/synapse/issues/16682)) +- Reoranganise test files. ([\#16684](https://github.com/matrix-org/synapse/issues/16684)) +- Remove old full schema dumps which are no longer used. ([\#16697](https://github.com/matrix-org/synapse/issues/16697)) +- Raise poetry-core upper bound to <=1.8.1. This allows contributors to import Synapse after `poetry install`ing with Poetry 1.6 and above. Contributed by Mo Balaa. ([\#16702](https://github.com/matrix-org/synapse/issues/16702)) +- Add a workflow to try and automatically fixup linting in a PR. ([\#16704](https://github.com/matrix-org/synapse/issues/16704)) + + +### Updates to locked dependencies + +* Bump cryptography from 41.0.5 to 41.0.6. ([\#16703](https://github.com/matrix-org/synapse/issues/16703)) +* Bump cryptography from 41.0.6 to 41.0.7. ([\#16721](https://github.com/matrix-org/synapse/issues/16721)) +* Bump idna from 3.4 to 3.6. ([\#16720](https://github.com/matrix-org/synapse/issues/16720)) +* Bump jsonschema from 4.19.1 to 4.20.0. ([\#16692](https://github.com/matrix-org/synapse/issues/16692)) +* Bump matrix-org/netlify-pr-preview from 2 to 3. ([\#16719](https://github.com/matrix-org/synapse/issues/16719)) +* Bump phonenumbers from 8.13.23 to 8.13.26. ([\#16722](https://github.com/matrix-org/synapse/issues/16722)) +* Bump prometheus-client from 0.18.0 to 0.19.0. ([\#16691](https://github.com/matrix-org/synapse/issues/16691)) +* Bump pyasn1 from 0.5.0 to 0.5.1. ([\#16689](https://github.com/matrix-org/synapse/issues/16689)) +* Bump pydantic from 2.4.2 to 2.5.1. ([\#16663](https://github.com/matrix-org/synapse/issues/16663)) +* Bump pyo3 (0.19.2→0.20.0), pythonize (0.19.0→0.20.0) and pyo3-log (0.8.1→0.9.0). ([\#16673](https://github.com/matrix-org/synapse/issues/16673)) +* Bump pyopenssl from 23.2.0 to 23.3.0. ([\#16662](https://github.com/matrix-org/synapse/issues/16662)) +* Bump ruff from 0.1.4 to 0.1.6. ([\#16690](https://github.com/matrix-org/synapse/issues/16690)) +* Bump sentry-sdk from 1.32.0 to 1.35.0. ([\#16666](https://github.com/matrix-org/synapse/issues/16666)) +* Bump serde from 1.0.192 to 1.0.193. ([\#16693](https://github.com/matrix-org/synapse/issues/16693)) +* Bump sphinx-autodoc2 from 0.4.2 to 0.5.0. ([\#16723](https://github.com/matrix-org/synapse/issues/16723)) +* Bump types-jsonschema from 4.19.0.4 to 4.20.0.0. ([\#16724](https://github.com/matrix-org/synapse/issues/16724)) +* Bump types-pillow from 10.1.0.0 to 10.1.0.2. ([\#16664](https://github.com/matrix-org/synapse/issues/16664)) +* Bump types-psycopg2 from 2.9.21.15 to 2.9.21.16. ([\#16665](https://github.com/matrix-org/synapse/issues/16665)) +* Bump types-setuptools from 68.2.0.0 to 68.2.0.2. ([\#16688](https://github.com/matrix-org/synapse/issues/16688)) + +# Synapse 1.97.0 (2023-11-28) + +Synapse will soon be forked by Element under an AGPLv3.0 licence (with CLA, for +proprietary dual licensing). You can read more about this here: + + - https://matrix.org/blog/2023/11/06/future-of-synapse-dendrite/ + - https://element.io/blog/element-to-adopt-agplv3/ + +The Matrix.org Foundation copy of the project will be archived. Any changes needed +by server administrators will be communicated via our usual announcements channels, +but we are striving to make this as seamless as possible. + + +No significant changes since 1.97.0rc1. + + +# Synapse 1.97.0rc1 (2023-11-21) + +### Features + +- Add support for asynchronous uploads as defined by [MSC2246](https://github.com/matrix-org/matrix-spec-proposals/pull/2246). Contributed by @sumnerevans at @beeper. ([\#15503](https://github.com/matrix-org/synapse/issues/15503)) +- Improve the performance of some operations in multi-worker deployments. ([\#16613](https://github.com/matrix-org/synapse/issues/16613), [\#16616](https://github.com/matrix-org/synapse/issues/16616)) + +### Bugfixes + +- Fix a long-standing bug where some queries updated the same row twice. Introduced in Synapse 1.57.0. ([\#16609](https://github.com/matrix-org/synapse/issues/16609)) +- Fix a long-standing bug where Synapse would not unbind third-party identifiers for Application Service users when deactivated and would not emit a compliant response. ([\#16617](https://github.com/matrix-org/synapse/issues/16617)) +- Fix sending out of order `POSITION` over replication, causing additional database load. ([\#16639](https://github.com/matrix-org/synapse/issues/16639)) + +### Improved Documentation + +- Note that the option [`outbound_federation_restricted_to`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#outbound_federation_restricted_to) was added in Synapse 1.89.0, and fix a nearby formatting error. ([\#16628](https://github.com/matrix-org/synapse/issues/16628)) +- Update parameter information for the `/timestamp_to_event` admin API. ([\#16631](https://github.com/matrix-org/synapse/issues/16631)) +- Provide an example for a common encrypted media response from the admin user media API and mention possible null values. ([\#16654](https://github.com/matrix-org/synapse/issues/16654)) + +### Internal Changes + +- Remove whole table locks on push rule modifications. Contributed by Nick @ Beeper (@fizzadar). ([\#16051](https://github.com/matrix-org/synapse/issues/16051)) +- Support reactor tick timings on more types of event loops. ([\#16532](https://github.com/matrix-org/synapse/issues/16532)) +- Improve type hints. ([\#16564](https://github.com/matrix-org/synapse/issues/16564), [\#16611](https://github.com/matrix-org/synapse/issues/16611), [\#16612](https://github.com/matrix-org/synapse/issues/16612)) +- Avoid executing no-op queries. ([\#16583](https://github.com/matrix-org/synapse/issues/16583)) +- Simplify persistence code to be per-room. ([\#16584](https://github.com/matrix-org/synapse/issues/16584)) +- Use standard SQL helpers in persistence code. ([\#16585](https://github.com/matrix-org/synapse/issues/16585)) +- Avoid updating the stream cache unnecessarily. ([\#16586](https://github.com/matrix-org/synapse/issues/16586)) +- Improve performance when using opentracing. ([\#16589](https://github.com/matrix-org/synapse/issues/16589)) +- Run push rule evaluator setup in parallel. ([\#16590](https://github.com/matrix-org/synapse/issues/16590)) +- Improve tests of the SQL generator. ([\#16596](https://github.com/matrix-org/synapse/issues/16596)) +- Use more generic database methods. ([\#16615](https://github.com/matrix-org/synapse/issues/16615)) +- Use `dbname` instead of the deprecated `database` connection parameter for psycopg2. ([\#16618](https://github.com/matrix-org/synapse/issues/16618)) +- Add an internal [Admin API endpoint](https://matrix-org.github.io/synapse/v1.97/usage/configuration/config_documentation.html#allow-replacing-master-cross-signing-key-without-user-interactive-auth) to temporarily grant the ability to update an existing cross-signing key without UIA. ([\#16634](https://github.com/matrix-org/synapse/issues/16634)) +- Improve references to GitHub issues. ([\#16637](https://github.com/matrix-org/synapse/issues/16637), [\#16638](https://github.com/matrix-org/synapse/issues/16638)) +- More efficiently handle no-op `POSITION` over replication. ([\#16640](https://github.com/matrix-org/synapse/issues/16640), [\#16655](https://github.com/matrix-org/synapse/issues/16655)) +- Speed up deleting of device messages when deleting a device. ([\#16643](https://github.com/matrix-org/synapse/issues/16643)) +- Speed up persisting large number of outliers. ([\#16649](https://github.com/matrix-org/synapse/issues/16649)) +- Reduce max concurrency of background tasks, reducing potential max DB load. ([\#16656](https://github.com/matrix-org/synapse/issues/16656), [\#16660](https://github.com/matrix-org/synapse/issues/16660)) +- Speed up purge room by adding an index to `event_push_summary`. ([\#16657](https://github.com/matrix-org/synapse/issues/16657)) + + + +### Updates to locked dependencies + +* Bump prometheus-client from 0.17.1 to 0.18.0. ([\#16626](https://github.com/matrix-org/synapse/issues/16626)) +* Bump pyicu from 2.11 to 2.12. ([\#16603](https://github.com/matrix-org/synapse/issues/16603)) +* Bump requests-toolbelt from 0.10.1 to 1.0.0. ([\#16659](https://github.com/matrix-org/synapse/issues/16659)) +* Bump ruff from 0.0.292 to 0.1.4. ([\#16600](https://github.com/matrix-org/synapse/issues/16600)) +* Bump serde from 1.0.190 to 1.0.192. ([\#16627](https://github.com/matrix-org/synapse/issues/16627)) +* Bump serde_json from 1.0.107 to 1.0.108. ([\#16604](https://github.com/matrix-org/synapse/issues/16604)) +* Bump setuptools-rust from 1.8.0 to 1.8.1. ([\#16601](https://github.com/matrix-org/synapse/issues/16601)) +* Bump towncrier from 23.6.0 to 23.11.0. ([\#16622](https://github.com/matrix-org/synapse/issues/16622)) +* Bump treq from 22.2.0 to 23.11.0. ([\#16623](https://github.com/matrix-org/synapse/issues/16623)) +* Bump twisted from 23.8.0 to 23.10.0. ([\#16588](https://github.com/matrix-org/synapse/issues/16588)) +* Bump types-bleach from 6.1.0.0 to 6.1.0.1. ([\#16624](https://github.com/matrix-org/synapse/issues/16624)) +* Bump types-jsonschema from 4.19.0.3 to 4.19.0.4. ([\#16599](https://github.com/matrix-org/synapse/issues/16599)) +* Bump types-pyopenssl from 23.2.0.2 to 23.3.0.0. ([\#16625](https://github.com/matrix-org/synapse/issues/16625)) +* Bump types-pyyaml from 6.0.12.11 to 6.0.12.12. ([\#16602](https://github.com/matrix-org/synapse/issues/16602)) + +# Synapse 1.96.1 (2023-11-17) + +Synapse will soon be forked by Element under an AGPLv3.0 licence (with CLA, for +proprietary dual licensing). You can read more about this here: + +* https://matrix.org/blog/2023/11/06/future-of-synapse-dendrite/ +* https://element.io/blog/element-to-adopt-agplv3/ + +The Matrix.org Foundation copy of the project will be archived. Any changes needed +by server administrators will be communicated via our usual +[announcements channels](https://matrix.to/#/#homeowners:matrix.org), but we are +striving to make this as seamless as possible. + +This minor release was needed only because of CI-related trouble on [v1.96.0](https://github.com/matrix-org/synapse/releases/tag/v1.96.0), which was never released. + +### Internal Changes + +- Fix building of wheels in CI. ([\#16653](https://github.com/matrix-org/synapse/issues/16653)) + +# Synapse 1.96.0 (2023-11-16) + +### Bugfixes + +- Fix "'int' object is not iterable" error in `set_device_id_for_pushers` background update introduced in Synapse 1.95.0. ([\#16594](https://github.com/matrix-org/synapse/issues/16594)) + +# Synapse 1.96.0rc1 (2023-10-31) + +### Features + +- Add experimental support to allow multiple workers to write to receipts stream. ([\#16432](https://github.com/matrix-org/synapse/issues/16432)) +- Add a new module API for controller presence. ([\#16544](https://github.com/matrix-org/synapse/issues/16544)) +- Add a new module API callback that allows adding extra fields to events' unsigned section when sent down to clients. ([\#16549](https://github.com/matrix-org/synapse/issues/16549)) +- Improve the performance of claiming encryption keys. ([\#16565](https://github.com/matrix-org/synapse/issues/16565), [\#16570](https://github.com/matrix-org/synapse/issues/16570)) + +### Bugfixes + +- Fixed a bug in the example Grafana dashboard that prevents it from finding the correct datasource. Contributed by @MichaelSasser. ([\#16471](https://github.com/matrix-org/synapse/issues/16471)) +- Fix a long-standing, exceedingly rare edge case where the first event persisted by a new event persister worker might not be sent down `/sync`. ([\#16473](https://github.com/matrix-org/synapse/issues/16473), [\#16557](https://github.com/matrix-org/synapse/issues/16557), [\#16561](https://github.com/matrix-org/synapse/issues/16561), [\#16578](https://github.com/matrix-org/synapse/issues/16578), [\#16580](https://github.com/matrix-org/synapse/issues/16580)) +- Fix long-standing bug where `/sync` incorrectly did not mark a room as `limited` in a sync requests when there were missing remote events. ([\#16485](https://github.com/matrix-org/synapse/issues/16485)) +- Fix a bug introduced in Synapse 1.41 where HTTP(S) forward proxy authorization would fail when using basic HTTP authentication with a long `username:password` string. ([\#16504](https://github.com/matrix-org/synapse/issues/16504)) +- Force TLS certificate verification in user registration script. ([\#16530](https://github.com/matrix-org/synapse/issues/16530)) +- Fix long-standing bug where `/sync` could tightloop after restart when using SQLite. ([\#16540](https://github.com/matrix-org/synapse/issues/16540)) +- Fix ratelimiting of message sending when using workers, where the ratelimit would only be applied after most of the work has been done. ([\#16558](https://github.com/matrix-org/synapse/issues/16558)) +- Fix a long-standing bug where invited/knocking users would not leave during a room purge. ([\#16559](https://github.com/matrix-org/synapse/issues/16559)) + +### Improved Documentation + +- Improve documentation of presence router. ([\#16529](https://github.com/matrix-org/synapse/issues/16529)) +- Add a sentence to the [opentracing docs](https://matrix-org.github.io/synapse/latest/opentracing.html) on how you can have jaeger in a different place than synapse. ([\#16531](https://github.com/matrix-org/synapse/issues/16531)) +- Correctly describe the meaning of unspecified rule lists in the [`alias_creation_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#alias_creation_rules) and [`room_list_publication_rules`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#room_list_publication_rules) config options and improve their descriptions more generally. ([\#16541](https://github.com/matrix-org/synapse/issues/16541)) +- Pin the recommended poetry version in [contributors' guide](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html). ([\#16550](https://github.com/matrix-org/synapse/issues/16550)) +- Fix a broken link to the [client breakdown](https://matrix.org/ecosystem/clients/) in the README. ([\#16569](https://github.com/matrix-org/synapse/issues/16569)) + +### Internal Changes + +- Improve performance of delete device messages query, cf issue [16479](https://github.com/matrix-org/synapse/issues/16479). ([\#16492](https://github.com/matrix-org/synapse/issues/16492)) +- Reduce memory allocations. ([\#16505](https://github.com/matrix-org/synapse/issues/16505)) +- Improve replication performance when purging rooms. ([\#16510](https://github.com/matrix-org/synapse/issues/16510)) +- Run tests against Python 3.12. ([\#16511](https://github.com/matrix-org/synapse/issues/16511)) +- Run trial & integration tests in continuous integration when `.ci` directory is modified. ([\#16512](https://github.com/matrix-org/synapse/issues/16512)) +- Remove duplicate call to mark remote server 'awake' when using a federation sending worker. ([\#16515](https://github.com/matrix-org/synapse/issues/16515)) +- Enable dirty runs on Complement CI, which is significantly faster. ([\#16520](https://github.com/matrix-org/synapse/issues/16520)) +- Stop deleting from an unused table. ([\#16521](https://github.com/matrix-org/synapse/issues/16521)) +- Improve type hints. ([\#16526](https://github.com/matrix-org/synapse/issues/16526), [\#16551](https://github.com/matrix-org/synapse/issues/16551)) +- Fix running unit tests on Twisted trunk. ([\#16528](https://github.com/matrix-org/synapse/issues/16528)) +- Reduce some spurious logging in worker mode. ([\#16555](https://github.com/matrix-org/synapse/issues/16555)) +- Stop porting a table in port db that we're going to nuke and rebuild anyway. ([\#16563](https://github.com/matrix-org/synapse/issues/16563)) +- Deal with warnings from running complement in CI. ([\#16567](https://github.com/matrix-org/synapse/issues/16567)) +- Allow building with `setuptools_rust` 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) + +### Updates to locked dependencies + +* Bump black from 23.10.0 to 23.10.1. ([\#16575](https://github.com/matrix-org/synapse/issues/16575)) +* Bump black from 23.9.1 to 23.10.0. ([\#16538](https://github.com/matrix-org/synapse/issues/16538)) +* Bump cryptography from 41.0.4 to 41.0.5. ([\#16572](https://github.com/matrix-org/synapse/issues/16572)) +* Bump gitpython from 3.1.37 to 3.1.40. ([\#16534](https://github.com/matrix-org/synapse/issues/16534)) +* Bump phonenumbers from 8.13.22 to 8.13.23. ([\#16576](https://github.com/matrix-org/synapse/issues/16576)) +* Bump pygithub from 1.59.1 to 2.1.1. ([\#16535](https://github.com/matrix-org/synapse/issues/16535)) +- Bump matrix-synapse-ldap3 from 0.2.2 to 0.3.0. ([\#16539](https://github.com/matrix-org/synapse/issues/16539)) +* Bump serde from 1.0.189 to 1.0.190. ([\#16577](https://github.com/matrix-org/synapse/issues/16577)) +* Bump setuptools-rust from 1.7.0 to 1.8.0. ([\#16574](https://github.com/matrix-org/synapse/issues/16574)) +* Bump types-pillow from 10.0.0.3 to 10.1.0.0. ([\#16536](https://github.com/matrix-org/synapse/issues/16536)) +* Bump types-psycopg2 from 2.9.21.14 to 2.9.21.15. ([\#16573](https://github.com/matrix-org/synapse/issues/16573)) +* Bump types-requests from 2.31.0.2 to 2.31.0.10. ([\#16537](https://github.com/matrix-org/synapse/issues/16537)) +* Bump urllib3 from 1.26.17 to 1.26.18. ([\#16516](https://github.com/matrix-org/synapse/issues/16516)) + +# Synapse 1.95.1 (2023-10-31) + +## Security advisory + +The following issue is fixed in 1.95.1. + +- [GHSA-mp92-3jfm-3575](https://github.com/matrix-org/synapse/security/advisories/GHSA-mp92-3jfm-3575) / [CVE-2023-43796](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-43796) — Moderate Severity + + Cached device information of remote users can be queried from Synapse. This can be used to enumerate the remote users known to a homeserver. + +See the advisory for more details. If you have any questions, email security@matrix.org. + + + +# Synapse 1.95.0 (2023-10-24) + +### Internal Changes + +- Build Debian packages for [Ubuntu 23.10 Mantic Minotaur](https://canonical.com/blog/canonical-releases-ubuntu-23-10-mantic-minotaur). ([\#16524](https://github.com/matrix-org/synapse/issues/16524)) + + +# Synapse 1.95.0rc1 (2023-10-17) + +### Bugfixes + +- Remove legacy unspecced `knock_state_events` field returned in some responses. ([\#16403](https://github.com/matrix-org/synapse/issues/16403)) +- Fix a bug introduced in Synapse 1.81.0 where an `AttributeError` would be raised when `_matrix/client/v3/account/whoami` is called over a unix socket. Contributed by @Sir-Photch. ([\#16404](https://github.com/matrix-org/synapse/issues/16404)) +- Properly return inline media when content types have parameters. ([\#16440](https://github.com/matrix-org/synapse/issues/16440)) +- Prevent the purging of large rooms from timing out when Postgres is in use. The timeout which causes this issue was introduced in Synapse 1.88.0. ([\#16455](https://github.com/matrix-org/synapse/issues/16455)) +- Improve the performance of purging rooms, particularly encrypted rooms. ([\#16457](https://github.com/matrix-org/synapse/issues/16457)) +- Fix a bug introduced in Synapse 1.59.0 where servers could be incorrectly marked as available after an error response was received. ([\#16506](https://github.com/matrix-org/synapse/issues/16506)) + +### Improved Documentation + +- Document internal background update mechanism. ([\#16420](https://github.com/matrix-org/synapse/issues/16420)) +- Fix a typo in the sql for [useful SQL for admins document](https://matrix-org.github.io/synapse/latest/usage/administration/useful_sql_for_admins.html). ([\#16477](https://github.com/matrix-org/synapse/issues/16477)) + +### Internal Changes + +- Bump pyo3 from 0.17.1 to 0.19.2. ([\#16162](https://github.com/matrix-org/synapse/issues/16162)) +- Update registration of media repository URLs. ([\#16419](https://github.com/matrix-org/synapse/issues/16419)) +- Improve type hints. ([\#16421](https://github.com/matrix-org/synapse/issues/16421), [\#16468](https://github.com/matrix-org/synapse/issues/16468), [\#16469](https://github.com/matrix-org/synapse/issues/16469), [\#16507](https://github.com/matrix-org/synapse/issues/16507)) +- Refactor some code to simplify and better type receipts stream adjacent code. ([\#16426](https://github.com/matrix-org/synapse/issues/16426)) +- Factor out `MultiWriter` token from `RoomStreamToken`. ([\#16427](https://github.com/matrix-org/synapse/issues/16427)) +- Improve code comments. ([\#16428](https://github.com/matrix-org/synapse/issues/16428)) +- Reduce memory allocations. ([\#16429](https://github.com/matrix-org/synapse/issues/16429), [\#16431](https://github.com/matrix-org/synapse/issues/16431), [\#16433](https://github.com/matrix-org/synapse/issues/16433), [\#16434](https://github.com/matrix-org/synapse/issues/16434), [\#16438](https://github.com/matrix-org/synapse/issues/16438), [\#16444](https://github.com/matrix-org/synapse/issues/16444)) +- Remove unused method. ([\#16435](https://github.com/matrix-org/synapse/issues/16435)) +- Improve rate limiting logic. ([\#16441](https://github.com/matrix-org/synapse/issues/16441)) +- Do not block running of CI behind the check for sign-off on PRs. ([\#16454](https://github.com/matrix-org/synapse/issues/16454)) +- Update the release script to remind releaser to check for special release notes. ([\#16461](https://github.com/matrix-org/synapse/issues/16461)) +- Update complement.sh to match new public API shape. ([\#16466](https://github.com/matrix-org/synapse/issues/16466)) +- Clean up logging on event persister endpoints. ([\#16488](https://github.com/matrix-org/synapse/issues/16488)) +- Remove useless async job to delete device messages on sync, since we only deliver (and hence delete) up to 100 device messages at a time. ([\#16491](https://github.com/matrix-org/synapse/issues/16491)) + +### Updates to locked dependencies + +* Bump bleach from 6.0.0 to 6.1.0. ([\#16451](https://github.com/matrix-org/synapse/issues/16451)) +* Bump jsonschema from 4.19.0 to 4.19.1. ([\#16500](https://github.com/matrix-org/synapse/issues/16500)) +* Bump netaddr from 0.8.0 to 0.9.0. ([\#16453](https://github.com/matrix-org/synapse/issues/16453)) +* Bump packaging from 23.1 to 23.2. ([\#16497](https://github.com/matrix-org/synapse/issues/16497)) +* Bump pillow from 10.0.1 to 10.1.0. ([\#16498](https://github.com/matrix-org/synapse/issues/16498)) +* Bump psycopg2 from 2.9.8 to 2.9.9. ([\#16452](https://github.com/matrix-org/synapse/issues/16452)) +* Bump pyo3-log from 0.8.3 to 0.8.4. ([\#16495](https://github.com/matrix-org/synapse/issues/16495)) +* Bump ruff from 0.0.290 to 0.0.292. ([\#16449](https://github.com/matrix-org/synapse/issues/16449)) +* Bump sentry-sdk from 1.31.0 to 1.32.0. ([\#16496](https://github.com/matrix-org/synapse/issues/16496)) +* Bump serde from 1.0.188 to 1.0.189. ([\#16494](https://github.com/matrix-org/synapse/issues/16494)) +* Bump types-bleach from 6.0.0.4 to 6.1.0.0. ([\#16450](https://github.com/matrix-org/synapse/issues/16450)) +* Bump types-jsonschema from 4.17.0.10 to 4.19.0.3. ([\#16499](https://github.com/matrix-org/synapse/issues/16499)) + +# Synapse 1.94.0 (2023-10-10) + +No significant changes since 1.94.0rc1. +However, please take note of the security advisory that follows. + +## Security advisory + +The following issue is fixed in 1.94.0 (and RC). + +- [GHSA-5chr-wjw5-3gq4](https://github.com/matrix-org/synapse/security/advisories/GHSA-5chr-wjw5-3gq4) / [CVE-2023-45129](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-45129) — Moderate Severity + + A malicious server ACL event can impact performance temporarily or permanently leading to a persistent denial of service. + + Homeservers running on a closed federation (which presumably do not need to use server ACLs) are not affected. + +See the advisory for more details. If you have any questions, email security@matrix.org. + + +# Synapse 1.94.0rc1 (2023-10-03) + +### Features + +- Render plain, CSS, CSV, JSON and common image formats in the browser (inline) when requested through the /download endpoint. ([\#15988](https://github.com/matrix-org/synapse/issues/15988)) +- Add experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) to push all encrypted events to clients. ([\#16361](https://github.com/matrix-org/synapse/issues/16361)) +- Minor performance improvement when sending presence to federated servers. ([\#16385](https://github.com/matrix-org/synapse/issues/16385)) +- Minor performance improvement by caching server ACL checking. ([\#16360](https://github.com/matrix-org/synapse/issues/16360)) + +### Improved Documentation + +- Add developer documentation concerning gradual schema migrations with column alterations. ([\#15691](https://github.com/matrix-org/synapse/issues/15691)) +- Improve documentation of the user directory search algorithm. ([\#16320](https://github.com/matrix-org/synapse/issues/16320)) +- Fix rendering of user admin API documentation around deactivation. This was broken in Synapse 1.91.0. ([\#16355](https://github.com/matrix-org/synapse/issues/16355)) +- Update documentation around message retention policies. ([\#16382](https://github.com/matrix-org/synapse/issues/16382)) +- Add note to `federation_domain_whitelist` config option to clarify its usage. ([\#16416](https://github.com/matrix-org/synapse/issues/16416)) +- Improve legacy release notes. ([\#16418](https://github.com/matrix-org/synapse/issues/16418)) + +### Deprecations and Removals + +- Remove Python version from `/_synapse/admin/v1/server_version`. ([\#16380](https://github.com/matrix-org/synapse/issues/16380)) + +### Internal Changes + +- Avoid running CI steps when the files they check have not been changed. ([\#14745](https://github.com/matrix-org/synapse/issues/14745), [\#16387](https://github.com/matrix-org/synapse/issues/16387)) +- Improve type hints. ([\#14911](https://github.com/matrix-org/synapse/issues/14911), [\#16350](https://github.com/matrix-org/synapse/issues/16350), [\#16356](https://github.com/matrix-org/synapse/issues/16356), [\#16395](https://github.com/matrix-org/synapse/issues/16395)) +- Added support for pydantic v2 in addition to pydantic v1. Contributed by Maxwell G (@gotmax23). ([\#16332](https://github.com/matrix-org/synapse/issues/16332)) +- Get CI to check PRs have been signed-off. ([\#16348](https://github.com/matrix-org/synapse/issues/16348)) +- Add missing licence header. ([\#16359](https://github.com/matrix-org/synapse/issues/16359)) +- Improve type hints, and bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. ([\#16381](https://github.com/matrix-org/synapse/issues/16381)) +- Improve comments in `StateGroupBackgroundUpdateStore`. ([\#16383](https://github.com/matrix-org/synapse/issues/16383)) +- Update maturin configuration. ([\#16394](https://github.com/matrix-org/synapse/issues/16394)) +- Downgrade replication stream time out error log lines to warning. ([\#16401](https://github.com/matrix-org/synapse/issues/16401)) + +### Updates to locked dependencies + +* Bump actions/checkout from 3 to 4. ([\#16250](https://github.com/matrix-org/synapse/issues/16250)) +* Bump cryptography from 41.0.3 to 41.0.4. ([\#16362](https://github.com/matrix-org/synapse/issues/16362)) +* Bump dawidd6/action-download-artifact from 2.27.0 to 2.28.0. ([\#16374](https://github.com/matrix-org/synapse/issues/16374)) +* Bump docker/setup-buildx-action from 2 to 3. ([\#16375](https://github.com/matrix-org/synapse/issues/16375)) +* Bump gitpython from 3.1.35 to 3.1.37. ([\#16376](https://github.com/matrix-org/synapse/issues/16376)) +* Bump msgpack from 1.0.5 to 1.0.6. ([\#16377](https://github.com/matrix-org/synapse/issues/16377)) +* Bump msgpack from 1.0.6 to 1.0.7. ([\#16412](https://github.com/matrix-org/synapse/issues/16412)) +* Bump phonenumbers from 8.13.19 to 8.13.22. ([\#16413](https://github.com/matrix-org/synapse/issues/16413)) +* Bump psycopg2 from 2.9.7 to 2.9.8. ([\#16409](https://github.com/matrix-org/synapse/issues/16409)) +* Bump pydantic from 2.3.0 to 2.4.2. ([\#16410](https://github.com/matrix-org/synapse/issues/16410)) +* Bump regex from 1.9.5 to 1.9.6. ([\#16408](https://github.com/matrix-org/synapse/issues/16408)) +* Bump sentry-sdk from 1.30.0 to 1.31.0. ([\#16378](https://github.com/matrix-org/synapse/issues/16378)) +* Bump types-netaddr from 0.8.0.9 to 0.9.0.1. ([\#16411](https://github.com/matrix-org/synapse/issues/16411)) +* Bump types-psycopg2 from 2.9.21.11 to 2.9.21.14. ([\#16381](https://github.com/matrix-org/synapse/issues/16381)) +* Bump urllib3 from 1.26.15 to 1.26.17. ([\#16422](https://github.com/matrix-org/synapse/issues/16422)) + +# Synapse 1.93.0 (2023-09-26) + +No significant changes since 1.93.0rc1. + + +## Security advisory + +The following issues are fixed in 1.93.0 (and RCs). + +- [GHSA-4f74-84v3-j9q5](https://github.com/matrix-org/synapse/security/advisories/GHSA-4f74-84v3-j9q5) / [CVE-2023-41335](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-41335) — Low Severity + + Temporary storage of plaintext passwords during password changes. + +- [GHSA-7565-cq32-vx2x](https://github.com/matrix-org/synapse/security/advisories/GHSA-7565-cq32-vx2x) / [CVE-2023-42453](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-42453) — Low Severity + + Improper validation of receipts allows forged read receipts. + +See the advisories for more details. If you have any questions, email security@matrix.org. + + +# Synapse 1.93.0rc1 (2023-09-19) + +### Features + +- Add automatic purge after all users have forgotten a room. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) +- Restore room purge/shutdown after a Synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488)) +- Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). ([\#16137](https://github.com/matrix-org/synapse/issues/16137)) +- Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. ([\#16219](https://github.com/matrix-org/synapse/issues/16219)) +- Add span information to requests sent to appservices. Contributed by MTRNord. ([\#16227](https://github.com/matrix-org/synapse/issues/16227)) +- Add the ability to enable/disable registrations when using CAS. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262)) +- Allow the `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265)) +- Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. ([\#16274](https://github.com/matrix-org/synapse/issues/16274)) +- Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. ([\#16328](https://github.com/matrix-org/synapse/issues/16328)) + +### Bugfixes + +- Fix a long-standing bug where multi-device accounts could cause high load due to presence. ([\#16066](https://github.com/matrix-org/synapse/issues/16066), [\#16170](https://github.com/matrix-org/synapse/issues/16170), [\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172), [\#16174](https://github.com/matrix-org/synapse/issues/16174)) +- Fix a long-standing bug where appservices using [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) to receive `to_device` messages would only get messages for one user. ([\#16251](https://github.com/matrix-org/synapse/issues/16251)) +- Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. ([\#16252](https://github.com/matrix-org/synapse/issues/16252)) +- Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. ([\#16257](https://github.com/matrix-org/synapse/issues/16257)) +- Avoid temporary storage of sensitive information. ([\#16272](https://github.com/matrix-org/synapse/issues/16272)) +- Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. ([\#16288](https://github.com/matrix-org/synapse/issues/16288)) +- Fix a long-standing bug where invalid receipts would be accepted. ([\#16327](https://github.com/matrix-org/synapse/issues/16327)) +- Use standard name for UTF-8 charset in emails. ([\#16329](https://github.com/matrix-org/synapse/issues/16329)) +- Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298)) + +### Improved Documentation + +- Fix typos in the documentation. ([\#16282](https://github.com/matrix-org/synapse/issues/16282)) +- Link to the Alpine Linux community package for Synapse. ([\#16304](https://github.com/matrix-org/synapse/issues/16304)) +- Use string for `federation_client_minimum_tls_version` documentation examples. Contributed by @jcgruenhage. ([\#16353](https://github.com/matrix-org/synapse/issues/16353)) + +### Internal Changes + +- Allow modules to delete rooms. ([\#15997](https://github.com/matrix-org/synapse/issues/15997)) +- Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. ([\#16090](https://github.com/matrix-org/synapse/issues/16090), [\#16263](https://github.com/matrix-org/synapse/issues/16263)) +- Fix type checking when using the new version of Twisted. ([\#16235](https://github.com/matrix-org/synapse/issues/16235)) +- Delete device messages asynchronously and in staged batches using the task scheduler. ([\#16240](https://github.com/matrix-org/synapse/issues/16240), [\#16311](https://github.com/matrix-org/synapse/issues/16311), [\#16312](https://github.com/matrix-org/synapse/issues/16312), [\#16313](https://github.com/matrix-org/synapse/issues/16313)) +- Bump minimum supported Rust version to 1.61.0. ([\#16248](https://github.com/matrix-org/synapse/issues/16248)) +- Update rust to version 1.71.1 in the nix development environment. ([\#16260](https://github.com/matrix-org/synapse/issues/16260)) +- Simplify server key storage. ([\#16261](https://github.com/matrix-org/synapse/issues/16261)) +- Reduce CPU overhead of change password endpoint. ([\#16264](https://github.com/matrix-org/synapse/issues/16264)) +- Stop purging from tables slated for removal. ([\#16273](https://github.com/matrix-org/synapse/issues/16273)) +- Improve type hints. ([\#16276](https://github.com/matrix-org/synapse/issues/16276), [\#16301](https://github.com/matrix-org/synapse/issues/16301), [\#16325](https://github.com/matrix-org/synapse/issues/16325), [\#16326](https://github.com/matrix-org/synapse/issues/16326)) +- Raise `setuptools_rust` version cap to 1.7.0. ([\#16277](https://github.com/matrix-org/synapse/issues/16277)) +- Fix using the new task scheduler causing lots of CPU to be used. ([\#16278](https://github.com/matrix-org/synapse/issues/16278)) +- Upgrade CI run of Python 3.12 from rc1 to rc2. ([\#16280](https://github.com/matrix-org/synapse/issues/16280)) +- Include values in SQL debug when using `execute_values` with Postgres. ([\#16281](https://github.com/matrix-org/synapse/issues/16281)) +- Enable additional linting checks. ([\#16283](https://github.com/matrix-org/synapse/issues/16283)) +- Refactor `receipts_graph` Postgres transactions to stop error messages. ([\#16299](https://github.com/matrix-org/synapse/issues/16299)) +- Small improvements to logging in replication code. ([\#16309](https://github.com/matrix-org/synapse/issues/16309)) +- Remove a reference cycle in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314)) +- Only use literal strings for background process names. ([\#16315](https://github.com/matrix-org/synapse/issues/16315)) +- Refactor `get_user_by_id`. ([\#16316](https://github.com/matrix-org/synapse/issues/16316)) +- Speed up task to delete to-device messages. ([\#16318](https://github.com/matrix-org/synapse/issues/16318)) +- Avoid patching code in tests. ([\#16349](https://github.com/matrix-org/synapse/issues/16349)) +- Test against PostgreSQL 16. ([\#16351](https://github.com/matrix-org/synapse/issues/16351)) + +### Updates to locked dependencies + +* Bump mypy from 1.4.1 to 1.5.1. ([\#16300](https://github.com/matrix-org/synapse/issues/16300)) +* Bump black from 23.7.0 to 23.9.1. ([\#16295](https://github.com/matrix-org/synapse/issues/16295)) +* Bump docker/build-push-action from 4 to 5. ([\#16336](https://github.com/matrix-org/synapse/issues/16336)) +* Bump docker/login-action from 2 to 3. ([\#16339](https://github.com/matrix-org/synapse/issues/16339)) +* Bump docker/metadata-action from 4 to 5. ([\#16337](https://github.com/matrix-org/synapse/issues/16337)) +* Bump docker/setup-qemu-action from 2 to 3. ([\#16338](https://github.com/matrix-org/synapse/issues/16338)) +* Bump furo from 2023.8.19 to 2023.9.10. ([\#16340](https://github.com/matrix-org/synapse/issues/16340)) +* Bump gitpython from 3.1.32 to 3.1.35. ([\#16267](https://github.com/matrix-org/synapse/issues/16267), [\#16279](https://github.com/matrix-org/synapse/issues/16279)) +* Bump mypy-zope from 1.0.0 to 1.0.1. ([\#16291](https://github.com/matrix-org/synapse/issues/16291)) +* Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344)) +* Bump regex from 1.9.4 to 1.9.5. ([\#16233](https://github.com/matrix-org/synapse/issues/16233)) +* Bump ruff from 0.0.286 to 0.0.290. ([\#16342](https://github.com/matrix-org/synapse/issues/16342)) +* Bump serde_json from 1.0.105 to 1.0.107. ([\#16296](https://github.com/matrix-org/synapse/issues/16296), [\#16345](https://github.com/matrix-org/synapse/issues/16345)) +* Bump twisted from 22.10.0 to 23.8.0. ([\#16235](https://github.com/matrix-org/synapse/issues/16235)) +* Bump types-pillow from 10.0.0.2 to 10.0.0.3. ([\#16293](https://github.com/matrix-org/synapse/issues/16293)) +* Bump types-setuptools from 68.0.0.3 to 68.2.0.0. ([\#16292](https://github.com/matrix-org/synapse/issues/16292)) +* Bump typing-extensions from 4.7.1 to 4.8.0. ([\#16341](https://github.com/matrix-org/synapse/issues/16341)) + +# Synapse 1.92.3 (2023-09-18) + +This is again a security update targeted at mitigating [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863). +It turns out that libwebp is bundled statically in Pillow wheels so we need to update this dependency instead of +libwebp package at the OS level. + +Unlike what was advertised in 1.92.2 changelog this release also impacts PyPI wheels and Debian packages from matrix.org. + +We encourage admins to upgrade as soon as possible. + + +### Internal Changes + +- Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. ([\#16347](https://github.com/matrix-org/synapse/issues/16347)) + +### Updates to locked dependencies + +* Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344)) + +# Synapse 1.92.2 (2023-09-15) + +This is a Docker-only update to mitigate [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863), a critical vulnerability in `libwebp`. Server admins not using Docker should ensure that their `libwebp` is up to date (if installed). We encourage admins to upgrade as soon as possible. + + +### Updates to the Docker image + +- Update docker image to use Debian bookworm as the base. ([\#16324](https://github.com/matrix-org/synapse/issues/16324)) + + +# Synapse 1.92.1 (2023-09-12) + +This minor release was needed only because of CI-related trouble on [v1.92.0](https://github.com/matrix-org/synapse/releases/tag/v1.92.0), which was never released. + +### Internal Changes + +- Stop building Ubuntu Kinetic since it is EOL and repos seem to be dead. + + +# Synapse 1.92.0 (2023-09-12) + +This release includes the same [bugfix](https://github.com/matrix-org/synapse/issues/16258) as Synapse 1.91.2. + +This version was never released following a CI build failure, cf [v1.92.1 changelog](https://github.com/matrix-org/synapse/releases/tag/v1.92.1). + +### Bugfixes + +- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) + +### Internal Changes + +- Fix incorrect docstring for `Ratelimiter`. ([\#16255](https://github.com/matrix-org/synapse/issues/16255)) +- Update the release script to work on macOS. ([\#16266](https://github.com/matrix-org/synapse/issues/16266)) + + +# Synapse 1.91.2 (2023-09-06) + +### Bugfixes + +- Revert [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) introspection cache, admin impersonation and account lock. ([\#16258](https://github.com/matrix-org/synapse/issues/16258)) + + +# Synapse 1.92.0rc1 (2023-09-05) + +### Features + +- Add configuration setting for CAS protocol version. Contributed by Aurélien Grimpard. ([\#15816](https://github.com/matrix-org/synapse/issues/15816)) +- Suppress notifications from message edits per [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958). ([\#16113](https://github.com/matrix-org/synapse/issues/16113)) +- Experimental support for [MSC4041](https://github.com/matrix-org/matrix-spec-proposals/pull/4041): return a `Retry-After` header with `M_LIMIT_EXCEEDED` error responses. ([\#16136](https://github.com/matrix-org/synapse/issues/16136)) +- Add `last_seen_ts` to the [admin users API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#16218](https://github.com/matrix-org/synapse/issues/16218)) +- Improve resource usage when sending data to a large number of remote hosts that are marked as "down". ([\#16223](https://github.com/matrix-org/synapse/issues/16223)) + +### Bugfixes + +- Fix IPv6-related bugs on SMTP settings, adding groundwork to fix similar issues. Contributed by @evilham and @telmich (ungleich.ch). ([\#16155](https://github.com/matrix-org/synapse/issues/16155)) +- Fix a spec compliance issue where requests to the `/publicRooms` federation API would specify `include_all_networks` as a string. ([\#16185](https://github.com/matrix-org/synapse/issues/16185)) +- Fix inaccurate error message while attempting to ban or unban a user with the same or higher PL by spliting the conditional statements. Contributed by @leviosacz. ([\#16205](https://github.com/matrix-org/synapse/issues/16205)) +- Fix a rare bug that broke looping calls, which could lead to e.g. linearly increasing memory usage. Introduced in v1.90.0. ([\#16210](https://github.com/matrix-org/synapse/issues/16210)) +- Fix a long-standing bug where uploading images would fail if we could not generate thumbnails for them. ([\#16211](https://github.com/matrix-org/synapse/issues/16211)) +- Fix a long-standing bug where we did not correctly back off from servers that had "gone" if they returned 4xx series error codes. ([\#16221](https://github.com/matrix-org/synapse/issues/16221)) + +### Improved Documentation + +- Update links to the [matrix.org blog](https://matrix.org/blog/). ([\#16008](https://github.com/matrix-org/synapse/issues/16008)) +- Document which [admin APIs](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) are disabled when experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support is enabled. ([\#16168](https://github.com/matrix-org/synapse/issues/16168)) +- Document [`exclude_rooms_from_sync`](https://matrix-org.github.io/synapse/v1.92/usage/configuration/config_documentation.html#exclude_rooms_from_sync) configuration option. ([\#16178](https://github.com/matrix-org/synapse/issues/16178)) + +### Internal Changes + +- Prepare unit tests for Python 3.12. ([\#16099](https://github.com/matrix-org/synapse/issues/16099)) +- Fix nightly CI jobs. ([\#16121](https://github.com/matrix-org/synapse/issues/16121), [\#16213](https://github.com/matrix-org/synapse/issues/16213)) +- Describe which rate limiter was hit in logs. ([\#16135](https://github.com/matrix-org/synapse/issues/16135)) +- Simplify presence code when using workers. ([\#16170](https://github.com/matrix-org/synapse/issues/16170)) +- Track per-device information in the presence code. ([\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172)) +- Stop using the `event_txn_id` table. ([\#16175](https://github.com/matrix-org/synapse/issues/16175)) +- Use `AsyncMock` instead of custom code. ([\#16179](https://github.com/matrix-org/synapse/issues/16179), [\#16180](https://github.com/matrix-org/synapse/issues/16180)) +- Improve error reporting of invalid data passed to `/_matrix/key/v2/query`. ([\#16183](https://github.com/matrix-org/synapse/issues/16183)) +- Task scheduler: add replication notify for new task to launch ASAP. ([\#16184](https://github.com/matrix-org/synapse/issues/16184)) +- Improve type hints. ([\#16186](https://github.com/matrix-org/synapse/issues/16186), [\#16188](https://github.com/matrix-org/synapse/issues/16188), [\#16201](https://github.com/matrix-org/synapse/issues/16201)) +- Bump black version to 23.7.0. ([\#16187](https://github.com/matrix-org/synapse/issues/16187)) +- Log the details of background update failures. ([\#16212](https://github.com/matrix-org/synapse/issues/16212)) +- Cache device resync requests over replication. ([\#16241](https://github.com/matrix-org/synapse/issues/16241)) + +### Updates to locked dependencies + +* Bump anyhow from 1.0.72 to 1.0.75. ([\#16141](https://github.com/matrix-org/synapse/issues/16141)) +* Bump furo from 2023.7.26 to 2023.8.19. ([\#16238](https://github.com/matrix-org/synapse/issues/16238)) +* Bump phonenumbers from 8.13.18 to 8.13.19. ([\#16237](https://github.com/matrix-org/synapse/issues/16237)) +* Bump psycopg2 from 2.9.6 to 2.9.7. ([\#16196](https://github.com/matrix-org/synapse/issues/16196)) +* Bump regex from 1.9.3 to 1.9.4. ([\#16195](https://github.com/matrix-org/synapse/issues/16195)) +* Bump ruff from 0.0.277 to 0.0.286. ([\#16198](https://github.com/matrix-org/synapse/issues/16198)) +* Bump sentry-sdk from 1.29.2 to 1.30.0. ([\#16236](https://github.com/matrix-org/synapse/issues/16236)) +* Bump serde from 1.0.184 to 1.0.188. ([\#16194](https://github.com/matrix-org/synapse/issues/16194)) +* Bump serde_json from 1.0.104 to 1.0.105. ([\#16140](https://github.com/matrix-org/synapse/issues/16140)) +* Bump types-psycopg2 from 2.9.21.10 to 2.9.21.11. ([\#16200](https://github.com/matrix-org/synapse/issues/16200)) +* Bump types-pyyaml from 6.0.12.10 to 6.0.12.11. ([\#16199](https://github.com/matrix-org/synapse/issues/16199)) + + +# Synapse 1.91.1 (2023-09-04) + +### Bugfixes + +- Fix a performance regression introduced in Synapse 1.91.0 where event persistence would cause an excessive linear growth in CPU usage. ([\#16220](https://github.com/matrix-org/synapse/issues/16220)) + + +# Synapse 1.91.0 (2023-08-30) + +No significant changes since 1.91.0rc1. + + +# Synapse 1.91.0rc1 (2023-08-23) + +### Features + +- Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870)) +- Implements a task scheduler for resumable potentially long running tasks. ([\#15891](https://github.com/matrix-org/synapse/issues/15891)) +- Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030)) +- Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094)) +- Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. ([\#16114](https://github.com/matrix-org/synapse/issues/16114)) + +### Bugfixes + +- Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). ([\#16052](https://github.com/matrix-org/synapse/issues/16052)) +- Fix a long-standing bu in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. ([\#16080](https://github.com/matrix-org/synapse/issues/16080)) +- Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. ([\#16116](https://github.com/matrix-org/synapse/issues/16116)) +- Filter out user agent references to the sliding sync proxy and rust-sdk from the user_daily_visits table to ensure that Element X can be represented fully. ([\#16124](https://github.com/matrix-org/synapse/issues/16124)) +- User constent and 3-PID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. ([\#16127](https://github.com/matrix-org/synapse/issues/16127), [\#16134](https://github.com/matrix-org/synapse/issues/16134)) +- Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. ([\#16133](https://github.com/matrix-org/synapse/issues/16133), [\#16169](https://github.com/matrix-org/synapse/issues/16169)) +- Fix performance degredation when there are a lot of in-flight replication requests. ([\#16148](https://github.com/matrix-org/synapse/issues/16148)) +- Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. ([\#16156](https://github.com/matrix-org/synapse/issues/16156), [\#16164](https://github.com/matrix-org/synapse/issues/16164)) + +### Improved Documentation + +- Structured logging docs: add a link to explain the ELK stack ([\#16091](https://github.com/matrix-org/synapse/issues/16091)) + +### Internal Changes + +- Update dehydrated devices implementation. ([\#16010](https://github.com/matrix-org/synapse/issues/16010)) +- Fix database performance of read/write worker locks. ([\#16061](https://github.com/matrix-org/synapse/issues/16061)) +- Fix building the nix development environment on MacOS systems. ([\#16063](https://github.com/matrix-org/synapse/issues/16063)) +- Override global statement timeout when creating indexes in Postgres. ([\#16085](https://github.com/matrix-org/synapse/issues/16085)) +- Fix the type annotation on `run_db_interaction` in the Module API. ([\#16089](https://github.com/matrix-org/synapse/issues/16089)) +- Clean-up the presence code. ([\#16092](https://github.com/matrix-org/synapse/issues/16092)) +- Run `pyupgrade` for Python 3.8+. ([\#16110](https://github.com/matrix-org/synapse/issues/16110)) +- Rename pagination and purge locks and add comments to explain why they exist and how they work. ([\#16112](https://github.com/matrix-org/synapse/issues/16112)) +- Attempt to fix the twisted trunk job. ([\#16115](https://github.com/matrix-org/synapse/issues/16115)) +- Cache token introspection response from OIDC provider. ([\#16117](https://github.com/matrix-org/synapse/issues/16117)) +- Add cache to `get_server_keys_json_for_remote`. ([\#16123](https://github.com/matrix-org/synapse/issues/16123)) +- Add an admin endpoint to allow authorizing server to signal token revocations. ([\#16125](https://github.com/matrix-org/synapse/issues/16125)) +- Add response time metrics for introspection requests for delegated auth. ([\#16131](https://github.com/matrix-org/synapse/issues/16131)) +- MSC3861: allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. ([\#16132](https://github.com/matrix-org/synapse/issues/16132)) +- Increase performance of read/write locks. ([\#16149](https://github.com/matrix-org/synapse/issues/16149)) +- Improve presence tests. ([\#16150](https://github.com/matrix-org/synapse/issues/16150), [\#16151](https://github.com/matrix-org/synapse/issues/16151), [\#16158](https://github.com/matrix-org/synapse/issues/16158)) +- Raised the poetry-core version cap to 1.7.0. ([\#16152](https://github.com/matrix-org/synapse/issues/16152)) +- Fix assertion in user directory unit tests. ([\#16157](https://github.com/matrix-org/synapse/issues/16157)) +- Reduce scope of locks when paginating to alleviate DB contention. ([\#16159](https://github.com/matrix-org/synapse/issues/16159)) +- Reduce DB contention on worker locks. ([\#16160](https://github.com/matrix-org/synapse/issues/16160)) +- Task scheduler: mark task as active if we are scheduling as soon as possible. ([\#16165](https://github.com/matrix-org/synapse/issues/16165)) + +### Updates to locked dependencies + +* Bump click from 8.1.6 to 8.1.7. ([\#16145](https://github.com/matrix-org/synapse/issues/16145)) +* Bump gitpython from 3.1.31 to 3.1.32. ([\#16103](https://github.com/matrix-org/synapse/issues/16103)) +* Bump ijson from 3.2.1 to 3.2.3. ([\#16143](https://github.com/matrix-org/synapse/issues/16143)) +* Bump isort from 5.11.5 to 5.12.0. ([\#16108](https://github.com/matrix-org/synapse/issues/16108)) +* Bump log from 0.4.19 to 0.4.20. ([\#16109](https://github.com/matrix-org/synapse/issues/16109)) +* Bump pygithub from 1.59.0 to 1.59.1. ([\#16144](https://github.com/matrix-org/synapse/issues/16144)) +* Bump sentry-sdk from 1.28.1 to 1.29.2. ([\#16142](https://github.com/matrix-org/synapse/issues/16142)) +* Bump serde from 1.0.183 to 1.0.184. ([\#16139](https://github.com/matrix-org/synapse/issues/16139)) +* Bump txredisapi from 1.4.9 to 1.4.10. ([\#16107](https://github.com/matrix-org/synapse/issues/16107)) +* Bump types-bleach from 6.0.0.3 to 6.0.0.4. ([\#16106](https://github.com/matrix-org/synapse/issues/16106)) +* Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105)) +* Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146)) + +# Synapse 1.91.0rc1 (2023-08-23) + +### Features + +- Implements an admin API to lock an user without deactivating them. Based on [MSC3939](https://github.com/matrix-org/matrix-spec-proposals/pull/3939). ([\#15870](https://github.com/matrix-org/synapse/issues/15870)) +- Allow specifying `client_secret_path` as alternative to `client_secret` for OIDC providers. This avoids leaking the client secret in the homeserver config. Contributed by @Ma27. ([\#16030](https://github.com/matrix-org/synapse/issues/16030)) +- Allow customising the IdP display name, icon, and brand for SAML and CAS providers (in addition to OIDC provider). ([\#16094](https://github.com/matrix-org/synapse/issues/16094)) +- Add an `admins` query parameter to the [List Accounts](https://matrix-org.github.io/synapse/v1.91/admin_api/user_admin_api.html#list-accounts) [admin API](https://matrix-org.github.io/synapse/v1.91/usage/administration/admin_api/index.html), to include only admins or to exclude admins in user queries. ([\#16114](https://github.com/matrix-org/synapse/issues/16114)) + +### Bugfixes + +- Fix long-standing bug where concurrent requests to change a user's push rules could cause a deadlock. Contributed by Nick @ Beeper (@fizzadar). ([\#16052](https://github.com/matrix-org/synapse/issues/16052)) +- Fix a long-standing bug in `/sync` where timeout=0 does not skip caching, resulting in slow calls in cases where there are no new changes. Contributed by @PlasmaIntec. ([\#16080](https://github.com/matrix-org/synapse/issues/16080)) +- Fix performance of state resolutions for large, old rooms that did not have the full auth chain persisted. ([\#16116](https://github.com/matrix-org/synapse/issues/16116)) +- Filter out user agent references to the sliding sync proxy and rust-sdk from the `user_daily_visits` table to ensure that Element X can be represented fully. ([\#16124](https://github.com/matrix-org/synapse/issues/16124)) +- User constent and third-party ID changes capability cannot be enabled when using experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support. ([\#16127](https://github.com/matrix-org/synapse/issues/16127), [\#16134](https://github.com/matrix-org/synapse/issues/16134)) +- Fix a rare race that could block new events from being sent for up to two minutes. Introduced in v1.90.0. ([\#16133](https://github.com/matrix-org/synapse/issues/16133), [\#16169](https://github.com/matrix-org/synapse/issues/16169)) +- Fix performance degredation when there are a lot of in-flight replication requests. ([\#16148](https://github.com/matrix-org/synapse/issues/16148)) +- Fix a bug introduced in 1.87 where synapse would send an excessive amount of federation requests to servers which have been offline for a long time. Contributed by Nico. ([\#16156](https://github.com/matrix-org/synapse/issues/16156), [\#16164](https://github.com/matrix-org/synapse/issues/16164)) + +### Improved Documentation + +- Structured logging docs: add a link to explain the ELK stack ([\#16091](https://github.com/matrix-org/synapse/issues/16091)) + +### Internal Changes + +- Update dehydrated devices implementation. ([\#16010](https://github.com/matrix-org/synapse/issues/16010)) +- Fix database performance of read/write worker locks. ([\#16061](https://github.com/matrix-org/synapse/issues/16061)) +- Fix building the nix development environment on MacOS systems. ([\#16063](https://github.com/matrix-org/synapse/issues/16063)) +- Override global statement timeout when creating indexes in Postgres. ([\#16085](https://github.com/matrix-org/synapse/issues/16085)) +- Fix the type annotation on `run_db_interaction` in the Module API. ([\#16089](https://github.com/matrix-org/synapse/issues/16089)) +- Clean-up the presence code. ([\#16092](https://github.com/matrix-org/synapse/issues/16092)) +- Run `pyupgrade` for Python 3.8+. ([\#16110](https://github.com/matrix-org/synapse/issues/16110)) +- Rename pagination and purge locks and add comments to explain why they exist and how they work. ([\#16112](https://github.com/matrix-org/synapse/issues/16112)) +- Attempt to fix the twisted trunk job. ([\#16115](https://github.com/matrix-org/synapse/issues/16115)) +- Cache token introspection response from OIDC provider. ([\#16117](https://github.com/matrix-org/synapse/issues/16117)) +- Add cache to `get_server_keys_json_for_remote`. ([\#16123](https://github.com/matrix-org/synapse/issues/16123)) +- Add an admin endpoint to allow authorizing server to signal token revocations. ([\#16125](https://github.com/matrix-org/synapse/issues/16125)) +- Add response time metrics for introspection requests for delegated auth. ([\#16131](https://github.com/matrix-org/synapse/issues/16131)) +- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow impersonation by an admin user using `_oidc_admin_impersonate_user_id` query parameter. ([\#16132](https://github.com/matrix-org/synapse/issues/16132)) +- Increase performance of read/write locks. ([\#16149](https://github.com/matrix-org/synapse/issues/16149)) +- Improve presence tests. ([\#16150](https://github.com/matrix-org/synapse/issues/16150), [\#16151](https://github.com/matrix-org/synapse/issues/16151), [\#16158](https://github.com/matrix-org/synapse/issues/16158)) +- Raised the poetry-core version cap to 1.7.0. ([\#16152](https://github.com/matrix-org/synapse/issues/16152)) +- Fix assertion in user directory unit tests. ([\#16157](https://github.com/matrix-org/synapse/issues/16157)) +- Reduce scope of locks when paginating to alleviate DB contention. ([\#16159](https://github.com/matrix-org/synapse/issues/16159)) +- Reduce DB contention on worker locks. ([\#16160](https://github.com/matrix-org/synapse/issues/16160)) +- Task scheduler: mark task as active if we are scheduling as soon as possible. ([\#16165](https://github.com/matrix-org/synapse/issues/16165)) +- Implements a task scheduler for resumable potentially long running tasks. ([\#15891](https://github.com/matrix-org/synapse/issues/15891)) + +### Updates to locked dependencies + +* Bump click from 8.1.6 to 8.1.7. ([\#16145](https://github.com/matrix-org/synapse/issues/16145)) +* Bump gitpython from 3.1.31 to 3.1.32. ([\#16103](https://github.com/matrix-org/synapse/issues/16103)) +* Bump ijson from 3.2.1 to 3.2.3. ([\#16143](https://github.com/matrix-org/synapse/issues/16143)) +* Bump isort from 5.11.5 to 5.12.0. ([\#16108](https://github.com/matrix-org/synapse/issues/16108)) +* Bump log from 0.4.19 to 0.4.20. ([\#16109](https://github.com/matrix-org/synapse/issues/16109)) +* Bump pygithub from 1.59.0 to 1.59.1. ([\#16144](https://github.com/matrix-org/synapse/issues/16144)) +* Bump sentry-sdk from 1.28.1 to 1.29.2. ([\#16142](https://github.com/matrix-org/synapse/issues/16142)) +* Bump serde from 1.0.183 to 1.0.184. ([\#16139](https://github.com/matrix-org/synapse/issues/16139)) +* Bump txredisapi from 1.4.9 to 1.4.10. ([\#16107](https://github.com/matrix-org/synapse/issues/16107)) +* Bump types-bleach from 6.0.0.3 to 6.0.0.4. ([\#16106](https://github.com/matrix-org/synapse/issues/16106)) +* Bump types-pillow from 10.0.0.1 to 10.0.0.2. ([\#16105](https://github.com/matrix-org/synapse/issues/16105)) +* Bump types-pyopenssl from 23.2.0.1 to 23.2.0.2. ([\#16146](https://github.com/matrix-org/synapse/issues/16146)) + +# Synapse 1.90.0 (2023-08-15) + +No significant changes since 1.90.0rc1. + + +# Synapse 1.90.0rc1 (2023-08-08) + +### Features + +- Scope transaction IDs to devices (implement [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970)). ([\#15629](https://github.com/matrix-org/synapse/issues/15629)) +- Remove old rows from the `cache_invalidation_stream_by_instance` table automatically (this table is unused in SQLite). ([\#15868](https://github.com/matrix-org/synapse/issues/15868)) + +### Bugfixes + +- Fix a long-standing bug where purging history and paginating simultaneously could lead to database corruption when using workers. ([\#15791](https://github.com/matrix-org/synapse/issues/15791)) +- Fix a long-standing bug where profile endpoint returned a 404 when the user's display name was empty. ([\#16012](https://github.com/matrix-org/synapse/issues/16012)) +- Fix a long-standing bug where the `synapse_port_db` failed to configure sequences for application services and partial stated rooms. ([\#16043](https://github.com/matrix-org/synapse/issues/16043)) +- Fix long-standing bug with deletion in dehydrated devices v2. ([\#16046](https://github.com/matrix-org/synapse/issues/16046)) + +### Updates to the Docker image + +- Add `org.opencontainers.image.version` labels to Docker containers [published by Matrix.org](https://hub.docker.com/r/matrixdotorg/synapse). Contributed by Mo Balaa. ([\#15972](https://github.com/matrix-org/synapse/issues/15972), [\#16009](https://github.com/matrix-org/synapse/issues/16009)) + +### Improved Documentation + +- Add a internal documentation page describing the ["streams" used within Synapse](https://matrix-org.github.io/synapse/v1.90/development/synapse_architecture/streams.html). ([\#16015](https://github.com/matrix-org/synapse/issues/16015)) +- Clarify comment on the keys/upload over replication enpoint. ([\#16016](https://github.com/matrix-org/synapse/issues/16016)) +- Do not expose Admin API in caddy reverse proxy example. Contributed by @NilsIrl. ([\#16027](https://github.com/matrix-org/synapse/issues/16027)) + +### Deprecations and Removals + +- Remove support for legacy application service paths. ([\#15964](https://github.com/matrix-org/synapse/issues/15964)) +- Move support for application service query parameter authorization behind a configuration option. ([\#16017](https://github.com/matrix-org/synapse/issues/16017)) + +### Internal Changes + +- Update SQL queries to inline boolean parameters as supported in SQLite 3.27. ([\#15525](https://github.com/matrix-org/synapse/issues/15525)) +- Allow for the configuration of the backoff algorithm for federation destinations. ([\#15754](https://github.com/matrix-org/synapse/issues/15754)) +- Allow modules to check whether the current worker is configured to run background tasks. ([\#15991](https://github.com/matrix-org/synapse/issues/15991)) +- Update support for [MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958) to match the latest revision of the MSC. ([\#15992](https://github.com/matrix-org/synapse/issues/15992)) +- Allow modules to schedule delayed background calls. ([\#15993](https://github.com/matrix-org/synapse/issues/15993)) +- Properly overwrite the `redacts` content-property for forwards-compatibility with room versions 1 through 10. ([\#16013](https://github.com/matrix-org/synapse/issues/16013)) +- Fix building the nix development environment on MacOS systems. ([\#16019](https://github.com/matrix-org/synapse/issues/16019)) +- Remove leading and trailing spaces when setting a display name. ([\#16031](https://github.com/matrix-org/synapse/issues/16031)) +- Combine duplicated code. ([\#16023](https://github.com/matrix-org/synapse/issues/16023)) +- Collect additional metrics from `ResponseCache` for eviction. ([\#16028](https://github.com/matrix-org/synapse/issues/16028)) +- Fix endpoint improperly declaring support for MSC3814. ([\#16068](https://github.com/matrix-org/synapse/issues/16068)) +- Drop backwards compat hack for event serialization. ([\#16069](https://github.com/matrix-org/synapse/issues/16069)) + +### Updates to locked dependencies + +* Update PyYAML to 6.0.1. ([\#16011](https://github.com/matrix-org/synapse/issues/16011)) +* Bump cryptography from 41.0.2 to 41.0.3. ([\#16048](https://github.com/matrix-org/synapse/issues/16048)) +* Bump furo from 2023.5.20 to 2023.7.26. ([\#16077](https://github.com/matrix-org/synapse/issues/16077)) +* Bump immutabledict from 2.2.4 to 3.0.0. ([\#16034](https://github.com/matrix-org/synapse/issues/16034)) +* Update certifi to 2023.7.22 and pygments to 2.15.1. ([\#16044](https://github.com/matrix-org/synapse/issues/16044)) +* Bump jsonschema from 4.18.3 to 4.19.0. ([\#16081](https://github.com/matrix-org/synapse/issues/16081)) +* Bump phonenumbers from 8.13.14 to 8.13.18. ([\#16076](https://github.com/matrix-org/synapse/issues/16076)) +* Bump regex from 1.9.1 to 1.9.3. ([\#16073](https://github.com/matrix-org/synapse/issues/16073)) +* Bump serde from 1.0.171 to 1.0.175. ([\#15982](https://github.com/matrix-org/synapse/issues/15982)) +* Bump serde from 1.0.175 to 1.0.179. ([\#16033](https://github.com/matrix-org/synapse/issues/16033)) +* Bump serde from 1.0.179 to 1.0.183. ([\#16074](https://github.com/matrix-org/synapse/issues/16074)) +* Bump serde_json from 1.0.103 to 1.0.104. ([\#16032](https://github.com/matrix-org/synapse/issues/16032)) +* Bump service-identity from 21.1.0 to 23.1.0. ([\#16038](https://github.com/matrix-org/synapse/issues/16038)) +* Bump types-commonmark from 0.9.2.3 to 0.9.2.4. ([\#16037](https://github.com/matrix-org/synapse/issues/16037)) +* Bump types-jsonschema from 4.17.0.8 to 4.17.0.10. ([\#16036](https://github.com/matrix-org/synapse/issues/16036)) +* Bump types-netaddr from 0.8.0.8 to 0.8.0.9. ([\#16035](https://github.com/matrix-org/synapse/issues/16035)) +* Bump types-opentracing from 2.4.10.5 to 2.4.10.6. ([\#16078](https://github.com/matrix-org/synapse/issues/16078)) +* Bump types-setuptools from 68.0.0.0 to 68.0.0.3. ([\#16079](https://github.com/matrix-org/synapse/issues/16079)) + +# Synapse 1.89.0 (2023-08-01) + +No significant changes since 1.89.0rc1. + + +# Synapse 1.89.0rc1 (2023-07-25) + +### Features + +- Add Unix Socket support for HTTP Replication Listeners. [Document and provide usage instructions](https://matrix-org.github.io/synapse/v1.89/usage/configuration/config_documentation.html#listeners) for utilizing Unix sockets in Synapse. Contributed by Jason Little. ([\#15708](https://github.com/matrix-org/synapse/issues/15708), [\#15924](https://github.com/matrix-org/synapse/issues/15924)) +- Allow `+` in Matrix IDs, per [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009). ([\#15911](https://github.com/matrix-org/synapse/issues/15911)) +- Support room version 11 from [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820). ([\#15912](https://github.com/matrix-org/synapse/issues/15912)) +- Allow configuring the set of workers to proxy outbound federation traffic through via `outbound_federation_restricted_to`. ([\#15913](https://github.com/matrix-org/synapse/issues/15913), [\#15969](https://github.com/matrix-org/synapse/issues/15969)) +- Implement [MSC3814](https://github.com/matrix-org/matrix-spec-proposals/pull/3814), dehydrated devices v2/shrivelled sessions and move [MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) behind a config flag. Contributed by Nico from Famedly, H-Shay and poljar. ([\#15929](https://github.com/matrix-org/synapse/issues/15929)) + +### Bugfixes + +- Fix a long-standing bug where remote invites weren't correctly pushed. ([\#15820](https://github.com/matrix-org/synapse/issues/15820)) +- Fix background schema updates failing over a large upgrade gap. ([\#15887](https://github.com/matrix-org/synapse/issues/15887)) +- Fix a bug introduced in 1.86.0 where Synapse starting with an empty `experimental_features` configuration setting. ([\#15925](https://github.com/matrix-org/synapse/issues/15925)) +- Fixed deploy annotations in the provided Grafana dashboard config, so that it shows for any homeserver and not just matrix.org. Contributed by @wrjlewis. ([\#15957](https://github.com/matrix-org/synapse/issues/15957)) +- Ensure a long state res does not starve CPU by occasionally yielding to the reactor. ([\#15960](https://github.com/matrix-org/synapse/issues/15960)) +- Properly handle redactions of creation events. ([\#15973](https://github.com/matrix-org/synapse/issues/15973)) +- Fix a bug where resyncing stale device lists could block responding to federation transactions, and thus delay receiving new data from the remote server. ([\#15975](https://github.com/matrix-org/synapse/issues/15975)) + +### Improved Documentation + +- Better clarify how to run a worker instance (pass both configs). ([\#15921](https://github.com/matrix-org/synapse/issues/15921)) +- Improve [the documentation](https://matrix-org.github.io/synapse/v1.89/admin_api/user_admin_api.html#login-as-a-user) for the login as a user admin API. ([\#15938](https://github.com/matrix-org/synapse/issues/15938)) +- Fix broken Arch Linux package link. Contributed by @SnipeXandrej. ([\#15981](https://github.com/matrix-org/synapse/issues/15981)) + +### Deprecations and Removals + +- Remove support for calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15928](https://github.com/matrix-org/synapse/issues/15928)) + +### Internal Changes + +- Mark `get_user_in_directory` private since it is only used in tests. Also remove the cache from it. ([\#15884](https://github.com/matrix-org/synapse/issues/15884)) +- Document which Python version runs on a given Linux distribution so we can more easily clean up later. ([\#15909](https://github.com/matrix-org/synapse/issues/15909)) +- Add details to warning in log when we fail to fetch an alias. ([\#15922](https://github.com/matrix-org/synapse/issues/15922)) +- Remove unneeded `__init__`. ([\#15926](https://github.com/matrix-org/synapse/issues/15926)) +- Fix bug with read/write lock implementation. This is currently unused so has no observable effects. ([\#15933](https://github.com/matrix-org/synapse/issues/15933), [\#15958](https://github.com/matrix-org/synapse/issues/15958)) +- Unbreak the nix development environment by pinning the Rust version to 1.70.0. ([\#15940](https://github.com/matrix-org/synapse/issues/15940)) +- Update presence metrics to differentiate remote vs local users. ([\#15952](https://github.com/matrix-org/synapse/issues/15952)) +- Stop reading from column `user_id` of table `profiles`. ([\#15955](https://github.com/matrix-org/synapse/issues/15955)) +- Build packages for Debian Trixie. ([\#15961](https://github.com/matrix-org/synapse/issues/15961)) +- Reduce the amount of state we pull out. ([\#15968](https://github.com/matrix-org/synapse/issues/15968)) +- Speed up updating state in large rooms. ([\#15971](https://github.com/matrix-org/synapse/issues/15971)) + +### Updates to locked dependencies + +* Bump anyhow from 1.0.71 to 1.0.72. ([\#15949](https://github.com/matrix-org/synapse/issues/15949)) +* Bump click from 8.1.3 to 8.1.6. ([\#15984](https://github.com/matrix-org/synapse/issues/15984)) +* Bump cryptography from 41.0.1 to 41.0.2. ([\#15943](https://github.com/matrix-org/synapse/issues/15943)) +* Bump jsonschema from 4.17.3 to 4.18.3. ([\#15948](https://github.com/matrix-org/synapse/issues/15948)) +* Bump pillow from 9.4.0 to 10.0.0. ([\#15986](https://github.com/matrix-org/synapse/issues/15986)) +* Bump prometheus-client from 0.17.0 to 0.17.1. ([\#15945](https://github.com/matrix-org/synapse/issues/15945)) +* Bump pydantic from 1.10.10 to 1.10.11. ([\#15946](https://github.com/matrix-org/synapse/issues/15946)) +* Bump pygithub from 1.58.2 to 1.59.0. ([\#15834](https://github.com/matrix-org/synapse/issues/15834)) +* Bump pyo3-log from 0.8.2 to 0.8.3. ([\#15951](https://github.com/matrix-org/synapse/issues/15951)) +* Bump sentry-sdk from 1.26.0 to 1.28.1. ([\#15985](https://github.com/matrix-org/synapse/issues/15985)) +* Bump serde_json from 1.0.100 to 1.0.103. ([\#15950](https://github.com/matrix-org/synapse/issues/15950)) +* Bump types-pillow from 9.5.0.4 to 10.0.0.1. ([\#15932](https://github.com/matrix-org/synapse/issues/15932)) +* Bump types-requests from 2.31.0.1 to 2.31.0.2. ([\#15983](https://github.com/matrix-org/synapse/issues/15983)) +* Bump typing-extensions from 4.5.0 to 4.7.1. ([\#15947](https://github.com/matrix-org/synapse/issues/15947)) + +# Synapse 1.88.0 (2023-07-18) + +This release + - raises the minimum supported version of Python to 3.8, as Python 3.7 is now [end-of-life](https://devguide.python.org/versions/), and + - removes deprecated config options related to worker deployment. + +See [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#upgrading-to-v1880) for more information. + + +### Bugfixes + +- Revert "Stop writing to column `user_id` of tables `profiles` and `user_filters`", which was introduced in Synapse 1.88.0rc1. ([\#15953](https://github.com/matrix-org/synapse/issues/15953)) + + +# Synapse 1.88.0rc1 (2023-07-11) + +### Features + +- Add `not_user_type` param to the [list accounts admin API](https://matrix-org.github.io/synapse/v1.88/admin_api/user_admin_api.html#list-accounts). ([\#15844](https://github.com/matrix-org/synapse/issues/15844)) + +### Bugfixes + +- Pin `pydantic` to `^=1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release. + Contributed by @PaarthShah. ([\#15862](https://github.com/matrix-org/synapse/issues/15862)) +- Correctly resize thumbnails with pillow version >=10. ([\#15876](https://github.com/matrix-org/synapse/issues/15876)) + +### Improved Documentation + +- Fixed header levels on the [Admin API "Users"](https://matrix-org.github.io/synapse/v1.87/admin_api/user_admin_api.html) documentation page. Contributed by @sumnerevans at @beeper. ([\#15852](https://github.com/matrix-org/synapse/issues/15852)) +- Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options. ([\#15872](https://github.com/matrix-org/synapse/issues/15872)) + +### Deprecations and Removals + +- **Remove deprecated `worker_replication_host`, `worker_replication_http_port` and `worker_replication_http_tls` configuration options.** See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.88/docs/upgrade.md#removal-of-worker_replication_-settings) for more details. ([\#15860](https://github.com/matrix-org/synapse/issues/15860)) +- Remove support for Python 3.7 and hence for Debian Buster. ([\#15851](https://github.com/matrix-org/synapse/issues/15851), [\#15892](https://github.com/matrix-org/synapse/issues/15892), [\#15893](https://github.com/matrix-org/synapse/issues/15893), [\#15917](https://github.com/matrix-org/synapse/pull/15917)) + +### Internal Changes + +- Add foreign key constraint to `event_forward_extremities`. ([\#15751](https://github.com/matrix-org/synapse/issues/15751), [\#15907](https://github.com/matrix-org/synapse/issues/15907)) +- Add read/write style cross-worker locks. ([\#15782](https://github.com/matrix-org/synapse/issues/15782)) +- Stop writing to column `user_id` of tables `profiles` and `user_filters`. ([\#15787](https://github.com/matrix-org/synapse/issues/15787)) +- Use lower isolation level when cleaning old presence stream data to avoid serialization errors. ([\#15826](https://github.com/matrix-org/synapse/issues/15826)) +- Add tracing to media `/upload` code paths. ([\#15850](https://github.com/matrix-org/synapse/issues/15850), [\#15888](https://github.com/matrix-org/synapse/issues/15888)) +- Add a timeout that aborts any Postgres statement taking more than 1 hour. ([\#15853](https://github.com/matrix-org/synapse/issues/15853)) +- Fix the `devenv up` configuration which was ignoring the config overrides. ([\#15854](https://github.com/matrix-org/synapse/issues/15854)) +- Optimised cleanup of old entries in `device_lists_stream`. ([\#15861](https://github.com/matrix-org/synapse/issues/15861)) +- Update the Matrix clients link in the _It works! Synapse is running_ landing page. ([\#15874](https://github.com/matrix-org/synapse/issues/15874)) +- Fix building Synapse with the nightly Rust compiler. ([\#15906](https://github.com/matrix-org/synapse/issues/15906)) +- Add `Server` to Access-Control-Expose-Headers header. ([\#15908](https://github.com/matrix-org/synapse/issues/15908)) + +### Updates to locked dependencies + +* Bump authlib from 1.2.0 to 1.2.1. ([\#15864](https://github.com/matrix-org/synapse/issues/15864)) +* Bump importlib-metadata from 6.6.0 to 6.7.0. ([\#15865](https://github.com/matrix-org/synapse/issues/15865)) +* Bump lxml from 4.9.2 to 4.9.3. ([\#15897](https://github.com/matrix-org/synapse/issues/15897)) +* Bump regex from 1.8.4 to 1.9.1. ([\#15902](https://github.com/matrix-org/synapse/issues/15902)) +* Bump ruff from 0.0.275 to 0.0.277. ([\#15900](https://github.com/matrix-org/synapse/issues/15900)) +* Bump sentry-sdk from 1.25.1 to 1.26.0. ([\#15867](https://github.com/matrix-org/synapse/issues/15867)) +* Bump serde_json from 1.0.99 to 1.0.100. ([\#15901](https://github.com/matrix-org/synapse/issues/15901)) +* Bump types-pyopenssl from 23.2.0.0 to 23.2.0.1. ([\#15866](https://github.com/matrix-org/synapse/issues/15866)) + +# Synapse 1.87.0 (2023-07-04) + +Please note that this will be the last release of Synapse that is compatible with +Python 3.7 and earlier. +This is due to Python 3.7 now having reached End of Life; see our [deprecation policy](https://matrix-org.github.io/synapse/v1.87/deprecation_policy.html) +for more details. + +### Bugfixes + +- Pin `pydantic` to `^1.7.4` to avoid backwards-incompatible API changes from the 2.0.0 release. + Resolves https://github.com/matrix-org/synapse/issues/15858. + Contributed by @PaarthShah. ([\#15862](https://github.com/matrix-org/synapse/issues/15862)) + +### Internal Changes + +- Split out 2022 changes from the changelog so the rendered version in GitHub doesn't timeout as much. ([\#15846](https://github.com/matrix-org/synapse/issues/15846)) + + +# Synapse 1.87.0rc1 (2023-06-27) + +### Features + +- Improve `/messages` response time by avoiding backfill when we already have messages to return. ([\#15737](https://github.com/matrix-org/synapse/issues/15737)) +- Add spam checker module API for logins. ([\#15838](https://github.com/matrix-org/synapse/issues/15838)) + +### Bugfixes + +- Fix a long-standing bug where media files were served in an unsafe manner. Contributed by @joshqou. ([\#15680](https://github.com/matrix-org/synapse/issues/15680)) +- Avoid invalidating a cache that was just prefilled. ([\#15758](https://github.com/matrix-org/synapse/issues/15758)) +- Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). ([\#15770](https://github.com/matrix-org/synapse/issues/15770)) +- Fix joining rooms through aliases where the alias server isn't a real homeserver. Contributed by @tulir @ Beeper. ([\#15776](https://github.com/matrix-org/synapse/issues/15776)) +- Fix a bug in push rules handling leading to an invalid (per spec) `is_user_mention` rule sent to clients. Also fix wrong rule names for `is_user_mention` and `is_room_mention`. ([\#15781](https://github.com/matrix-org/synapse/issues/15781)) +- Fix a bug introduced in 1.57.0 where the wrong table would be locked on updating database rows when using SQLite as the database backend. ([\#15788](https://github.com/matrix-org/synapse/issues/15788)) +- Fix Sytest environmental variable evaluation in CI. ([\#15804](https://github.com/matrix-org/synapse/issues/15804)) +- Fix forgotten rooms missing from initial sync after rejoining them. Contributed by Nico from Famedly. ([\#15815](https://github.com/matrix-org/synapse/issues/15815)) +- Fix sqlite `user_filters` upgrade introduced in v1.86.0. ([\#15817](https://github.com/matrix-org/synapse/issues/15817)) + +### Improved Documentation + +- Document `looping_call()` functionality that will wait for the given function to finish before scheduling another. ([\#15772](https://github.com/matrix-org/synapse/issues/15772)) +- Fix a typo in the [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html). ([\#15805](https://github.com/matrix-org/synapse/issues/15805)) +- Fix typo in MSC number in faster remote room join architecture doc. ([\#15812](https://github.com/matrix-org/synapse/issues/15812)) + +### Deprecations and Removals + +- Remove experimental [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to incrementally import history into existing rooms. ([\#15748](https://github.com/matrix-org/synapse/issues/15748)) + +### Internal Changes + +- Replace `EventContext` fields `prev_group` and `delta_ids` with field `state_group_deltas`. ([\#15233](https://github.com/matrix-org/synapse/issues/15233)) +- Regularly try to send transactions to other servers after they failed instead of waiting for a new event to be available before trying. ([\#15743](https://github.com/matrix-org/synapse/issues/15743)) +- Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983). ([\#15755](https://github.com/matrix-org/synapse/issues/15755)) +- Allow for the configuration of max request retries and min/max retry delays in the matrix federation client. ([\#15783](https://github.com/matrix-org/synapse/issues/15783)) +- Switch from `matrix://` to `matrix-federation://` scheme for internal Synapse routing of outbound federation traffic. ([\#15806](https://github.com/matrix-org/synapse/issues/15806)) +- Fix harmless exceptions being printed when running the port DB script. ([\#15814](https://github.com/matrix-org/synapse/issues/15814)) + +### Updates to locked dependencies + +* Bump attrs from 22.2.0 to 23.1.0. ([\#15801](https://github.com/matrix-org/synapse/issues/15801)) +* Bump cryptography from 40.0.2 to 41.0.1. ([\#15800](https://github.com/matrix-org/synapse/issues/15800)) +* Bump ijson from 3.2.0.post0 to 3.2.1. ([\#15802](https://github.com/matrix-org/synapse/issues/15802)) +* Bump phonenumbers from 8.13.13 to 8.13.14. ([\#15798](https://github.com/matrix-org/synapse/issues/15798)) +* Bump ruff from 0.0.265 to 0.0.272. ([\#15799](https://github.com/matrix-org/synapse/issues/15799)) +* Bump ruff from 0.0.272 to 0.0.275. ([\#15833](https://github.com/matrix-org/synapse/issues/15833)) +* Bump serde_json from 1.0.96 to 1.0.97. ([\#15797](https://github.com/matrix-org/synapse/issues/15797)) +* Bump serde_json from 1.0.97 to 1.0.99. ([\#15832](https://github.com/matrix-org/synapse/issues/15832)) +* Bump towncrier from 22.12.0 to 23.6.0. ([\#15831](https://github.com/matrix-org/synapse/issues/15831)) +* Bump types-opentracing from 2.4.10.4 to 2.4.10.5. ([\#15830](https://github.com/matrix-org/synapse/issues/15830)) +* Bump types-setuptools from 67.8.0.0 to 68.0.0.0. ([\#15835](https://github.com/matrix-org/synapse/issues/15835)) + +Synapse 1.86.0 (2023-06-20) +=========================== + +No significant changes since 1.86.0rc2. + + +Synapse 1.86.0rc2 (2023-06-14) +============================== + +Bugfixes +-------- + +- Fix an error when having workers of different versions running. ([\#15774](https://github.com/matrix-org/synapse/issues/15774)) + + +Synapse 1.86.0rc1 (2023-06-13) +============================== + +This version was tagged but never released. + +Features +-------- + +- Stable support for [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#15388](https://github.com/matrix-org/synapse/issues/15388)) +- Support resolving a room's [canonical alias](https://spec.matrix.org/v1.7/client-server-api/#mroomcanonical_alias) via the module API. ([\#15450](https://github.com/matrix-org/synapse/issues/15450)) +- Enable support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#15520](https://github.com/matrix-org/synapse/issues/15520)) +- Experimental [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) support: delegate auth to an OIDC provider. ([\#15582](https://github.com/matrix-org/synapse/issues/15582)) +- Add Synapse version deploy annotations to Grafana dashboard which enables easy correlation between behavior changes witnessed in a graph to a certain Synapse version and nail down regressions. ([\#15674](https://github.com/matrix-org/synapse/issues/15674)) +- Add a catch-all * to the supported relation types when redacting an event and its related events. This is an update to [MSC3912](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation. ([\#15705](https://github.com/matrix-org/synapse/issues/15705)) +- Speed up `/messages` by backfilling in the background when there are no backward extremities where we are directly paginating. ([\#15710](https://github.com/matrix-org/synapse/issues/15710)) +- Expose a metric reporting the database background update status. ([\#15740](https://github.com/matrix-org/synapse/issues/15740)) + + +Bugfixes +-------- + +- Correctly clear caches when we delete a room. ([\#15609](https://github.com/matrix-org/synapse/issues/15609)) +- Check permissions for enabling encryption earlier during room creation to avoid creating broken rooms. ([\#15695](https://github.com/matrix-org/synapse/issues/15695)) + + +Improved Documentation +---------------------- + +- Simplify query to find participating servers in a room. ([\#15732](https://github.com/matrix-org/synapse/issues/15732)) + + +Internal Changes +---------------- + +- Log when events are (maybe unexpectedly) filtered out of responses in tests. ([\#14213](https://github.com/matrix-org/synapse/issues/14213)) +- Read from column `full_user_id` rather than `user_id` of tables `profiles` and `user_filters`. ([\#15649](https://github.com/matrix-org/synapse/issues/15649)) +- Add support for tracing functions which return `Awaitable`s. ([\#15650](https://github.com/matrix-org/synapse/issues/15650)) +- Cache requests for user's devices over federation. ([\#15675](https://github.com/matrix-org/synapse/issues/15675)) +- Add fully qualified docker image names to Dockerfiles. ([\#15689](https://github.com/matrix-org/synapse/issues/15689)) +- Remove some unused code. ([\#15690](https://github.com/matrix-org/synapse/issues/15690)) +- Improve type hints. ([\#15694](https://github.com/matrix-org/synapse/issues/15694), [\#15697](https://github.com/matrix-org/synapse/issues/15697)) +- Update docstring and traces on `maybe_backfill()` functions. ([\#15709](https://github.com/matrix-org/synapse/issues/15709)) +- Add context for when/why to use the `long_retries` option when sending Federation requests. ([\#15721](https://github.com/matrix-org/synapse/issues/15721)) +- Removed some unused fields. ([\#15723](https://github.com/matrix-org/synapse/issues/15723)) +- Update federation error to more plainly explain we can only authorize our own membership events. ([\#15725](https://github.com/matrix-org/synapse/issues/15725)) +- Prevent the `latest_deps` and `twisted_trunk` daily GitHub Actions workflows from running on forks of the codebase. ([\#15726](https://github.com/matrix-org/synapse/issues/15726)) +- Improve performance of user directory search. ([\#15729](https://github.com/matrix-org/synapse/issues/15729)) +- Remove redundant table join with `room_memberships` when doing a `is_host_joined()`/`is_host_invited()` call (`membership` is already part of the `current_state_events`). ([\#15731](https://github.com/matrix-org/synapse/issues/15731)) +- Remove superfluous `room_memberships` join from background update. ([\#15733](https://github.com/matrix-org/synapse/issues/15733)) +- Speed up typechecking CI. ([\#15752](https://github.com/matrix-org/synapse/issues/15752)) +- Bump minimum supported Rust version to 1.60.0. ([\#15768](https://github.com/matrix-org/synapse/issues/15768)) + +### Updates to locked dependencies + +* Bump importlib-metadata from 6.1.0 to 6.6.0. ([\#15711](https://github.com/matrix-org/synapse/issues/15711)) +* Bump library/redis from 6-bullseye to 7-bullseye in /docker. ([\#15712](https://github.com/matrix-org/synapse/issues/15712)) +* Bump log from 0.4.18 to 0.4.19. ([\#15761](https://github.com/matrix-org/synapse/issues/15761)) +* Bump phonenumbers from 8.13.11 to 8.13.13. ([\#15763](https://github.com/matrix-org/synapse/issues/15763)) +* Bump pyasn1 from 0.4.8 to 0.5.0. ([\#15713](https://github.com/matrix-org/synapse/issues/15713)) +* Bump pydantic from 1.10.8 to 1.10.9. ([\#15762](https://github.com/matrix-org/synapse/issues/15762)) +* Bump pyo3-log from 0.8.1 to 0.8.2. ([\#15759](https://github.com/matrix-org/synapse/issues/15759)) +* Bump pyopenssl from 23.1.1 to 23.2.0. ([\#15765](https://github.com/matrix-org/synapse/issues/15765)) +* Bump regex from 1.7.3 to 1.8.4. ([\#15769](https://github.com/matrix-org/synapse/issues/15769)) +* Bump sentry-sdk from 1.22.1 to 1.25.0. ([\#15714](https://github.com/matrix-org/synapse/issues/15714)) +* Bump sentry-sdk from 1.25.0 to 1.25.1. ([\#15764](https://github.com/matrix-org/synapse/issues/15764)) +* Bump serde from 1.0.163 to 1.0.164. ([\#15760](https://github.com/matrix-org/synapse/issues/15760)) +* Bump types-jsonschema from 4.17.0.7 to 4.17.0.8. ([\#15716](https://github.com/matrix-org/synapse/issues/15716)) +* Bump types-pyopenssl from 23.1.0.2 to 23.2.0.0. ([\#15766](https://github.com/matrix-org/synapse/issues/15766)) +* Bump types-requests from 2.31.0.0 to 2.31.0.1. ([\#15715](https://github.com/matrix-org/synapse/issues/15715)) + +Synapse 1.85.2 (2023-06-08) +=========================== + +Bugfixes +-------- + +- Fix regression where using TLS for HTTP replication between workers did not work. Introduced in v1.85.0. ([\#15746](https://github.com/matrix-org/synapse/issues/15746)) + + +Synapse 1.85.1 (2023-06-07) +=========================== + +Note: this release only fixes a bug that stopped some deployments from upgrading to v1.85.0. There is no need to upgrade to v1.85.1 if successfully running v1.85.0. + +Bugfixes +-------- + +- Fix bug in schema delta that broke upgrades for some deployments. Introduced in v1.85.0. ([\#15738](https://github.com/matrix-org/synapse/issues/15738), [\#15739](https://github.com/matrix-org/synapse/issues/15739)) + + +Synapse 1.85.0 (2023-06-06) +=========================== + +No significant changes since 1.85.0rc2. + + +## Security advisory + +The following issues are fixed in 1.85.0 (and RCs). + +- [GHSA-26c5-ppr8-f33p](https://github.com/matrix-org/synapse/security/advisories/GHSA-26c5-ppr8-f33p) / [CVE-2023-32682](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32682) — Low Severity + + It may be possible for a deactivated user to login when using uncommon configurations. + +- [GHSA-98px-6486-j7qc](https://github.com/matrix-org/synapse/security/advisories/GHSA-98px-6486-j7qc) / [CVE-2023-32683](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-32683) — Low Severity + + A discovered oEmbed or image URL can bypass the `url_preview_url_blacklist` setting potentially allowing server side request forgery or bypassing network policies. Impact is limited to IP addresses allowed by the `url_preview_ip_range_blacklist` setting (by default this only allows public IPs). + +See the advisories for more details. If you have any questions, email security@matrix.org. + + +Synapse 1.85.0rc2 (2023-06-01) +============================== + +Bugfixes +-------- + +- Fix a performance issue introduced in Synapse v1.83.0 which meant that purging rooms was very slow and database-intensive. ([\#15693](https://github.com/matrix-org/synapse/issues/15693)) + + +Deprecations and Removals +------------------------- + +- Deprecate calling the `/register` endpoint with an unspecced `user` property for application services. ([\#15703](https://github.com/matrix-org/synapse/issues/15703)) + + +Internal Changes +---------------- + +- Speed up background jobs `populate_full_user_id_user_filters` and `populate_full_user_id_profiles`. ([\#15700](https://github.com/matrix-org/synapse/issues/15700)) + + +Synapse 1.85.0rc1 (2023-05-30) +============================== + +Features +-------- + +- Improve performance of backfill requests by performing backfill of previously failed requests in the background. ([\#15585](https://github.com/matrix-org/synapse/issues/15585)) +- Add a new [admin API](https://matrix-org.github.io/synapse/v1.85/usage/administration/admin_api/index.html) to [create a new device for a user](https://matrix-org.github.io/synapse/v1.85/admin_api/user_admin_api.html#create-a-device). ([\#15611](https://github.com/matrix-org/synapse/issues/15611)) +- Add Unix socket support for Redis connections. Contributed by Jason Little. ([\#15644](https://github.com/matrix-org/synapse/issues/15644)) + + +Bugfixes +-------- + +- Fix a long-standing bug where setting the read marker could fail when using message retention. Contributed by Nick @ Beeper (@fizzadar). ([\#15464](https://github.com/matrix-org/synapse/issues/15464)) +- Fix a long-standing bug where the `url_preview_url_blacklist` configuration setting was not applied to oEmbed or image URLs found while previewing a URL. ([\#15601](https://github.com/matrix-org/synapse/issues/15601)) +- Fix a long-standing bug where filters with multiple backslashes were rejected. ([\#15607](https://github.com/matrix-org/synapse/issues/15607)) +- Fix a bug introduced in Synapse 1.82.0 where the error message displayed when validation of the `app_service_config_files` config option fails would be incorrectly formatted. ([\#15614](https://github.com/matrix-org/synapse/issues/15614)) +- Fix a long-standing bug where deactivated users were still able to login using the custom `org.matrix.login.jwt` login type (if enabled). ([\#15624](https://github.com/matrix-org/synapse/issues/15624)) +- Fix a long-standing bug where deactivated users were able to login in uncommon situations. ([\#15634](https://github.com/matrix-org/synapse/issues/15634)) + + +Improved Documentation +---------------------- + +- Warn users that at least 3.75GB of space is needed for the nix Synapse development environment. ([\#15613](https://github.com/matrix-org/synapse/issues/15613)) +- Remove outdated comment from the generated and sample homeserver log configs. ([\#15648](https://github.com/matrix-org/synapse/issues/15648)) +- Improve contributor docs to make it more clear that Rust is a necessary prerequisite. Contributed by @grantm. ([\#15668](https://github.com/matrix-org/synapse/issues/15668)) + + +Deprecations and Removals +------------------------- + +- Remove the old version of the R30 (30-day retained users) phone-home metric. ([\#10428](https://github.com/matrix-org/synapse/issues/10428)) + + +Internal Changes +---------------- + +- Create dependabot changelogs at release time. ([\#15481](https://github.com/matrix-org/synapse/issues/15481)) +- Add not null constraint to column `full_user_id` of tables `profiles` and `user_filters`. ([\#15537](https://github.com/matrix-org/synapse/issues/15537)) +- Allow connecting to HTTP Replication Endpoints by using `worker_name` when constructing the request. ([\#15578](https://github.com/matrix-org/synapse/issues/15578)) +- Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. ([\#15597](https://github.com/matrix-org/synapse/issues/15597)) +- Run mypy type checking with the minimum supported Python version to catch new usage that isn't backwards-compatible. ([\#15602](https://github.com/matrix-org/synapse/issues/15602)) +- Fix subscriptable type usage in Python <3.9. ([\#15604](https://github.com/matrix-org/synapse/issues/15604)) +- Update internal terminology. ([\#15606](https://github.com/matrix-org/synapse/issues/15606), [\#15620](https://github.com/matrix-org/synapse/issues/15620)) +- Instrument `state` and `state_group` storage-related operations to better picture what's happening when tracing. ([\#15610](https://github.com/matrix-org/synapse/issues/15610), [\#15647](https://github.com/matrix-org/synapse/issues/15647)) +- Trace how many new events from the backfill response we need to process. ([\#15633](https://github.com/matrix-org/synapse/issues/15633)) +- Re-type config paths in `ConfigError`s to be `StrSequence`s instead of `Iterable[str]`s. ([\#15615](https://github.com/matrix-org/synapse/issues/15615)) +- Update Mutual Rooms ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) implementation to match new proposal text. ([\#15621](https://github.com/matrix-org/synapse/issues/15621)) +- Remove the unstable identifiers from faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#15625](https://github.com/matrix-org/synapse/issues/15625)) +- Fix the olddeps CI. ([\#15626](https://github.com/matrix-org/synapse/issues/15626)) +- Remove duplicate timestamp from test logs (`_trial_temp/test.log`). ([\#15636](https://github.com/matrix-org/synapse/issues/15636)) +- Fix two memory leaks in `trial` test runs. ([\#15630](https://github.com/matrix-org/synapse/issues/15630)) +- Limit the size of the `HomeServerConfig` cache in trial test runs. ([\#15646](https://github.com/matrix-org/synapse/issues/15646)) +- Improve type hints. ([\#15658](https://github.com/matrix-org/synapse/issues/15658), [\#15659](https://github.com/matrix-org/synapse/issues/15659)) +- Add requesting user id parameter to key claim methods in `TransportLayerClient`. ([\#15663](https://github.com/matrix-org/synapse/issues/15663)) +- Speed up rebuilding of the user directory for local users. ([\#15665](https://github.com/matrix-org/synapse/issues/15665)) +- Implement "option 2" for [MSC3820](https://github.com/matrix-org/matrix-spec-proposals/pull/3820): Room version 11. ([\#15666](https://github.com/matrix-org/synapse/issues/15666), [\#15678](https://github.com/matrix-org/synapse/issues/15678)) + +### Updates to locked dependencies + +* Bump furo from 2023.3.27 to 2023.5.20. ([\#15642](https://github.com/matrix-org/synapse/issues/15642)) +* Bump log from 0.4.17 to 0.4.18. ([\#15681](https://github.com/matrix-org/synapse/issues/15681)) +* Bump prometheus-client from 0.16.0 to 0.17.0. ([\#15682](https://github.com/matrix-org/synapse/issues/15682)) +* Bump pydantic from 1.10.7 to 1.10.8. ([\#15685](https://github.com/matrix-org/synapse/issues/15685)) +* Bump pygithub from 1.58.1 to 1.58.2. ([\#15643](https://github.com/matrix-org/synapse/issues/15643)) +* Bump requests from 2.28.2 to 2.31.0. ([\#15651](https://github.com/matrix-org/synapse/issues/15651)) +* Bump sphinx from 6.1.3 to 6.2.1. ([\#15641](https://github.com/matrix-org/synapse/issues/15641)) +* Bump types-bleach from 6.0.0.1 to 6.0.0.3. ([\#15686](https://github.com/matrix-org/synapse/issues/15686)) +* Bump types-pillow from 9.5.0.2 to 9.5.0.4. ([\#15640](https://github.com/matrix-org/synapse/issues/15640)) +* Bump types-pyyaml from 6.0.12.9 to 6.0.12.10. ([\#15683](https://github.com/matrix-org/synapse/issues/15683)) +* Bump types-requests from 2.30.0.0 to 2.31.0.0. ([\#15684](https://github.com/matrix-org/synapse/issues/15684)) +* Bump types-setuptools from 67.7.0.2 to 67.8.0.0. ([\#15639](https://github.com/matrix-org/synapse/issues/15639)) + +Synapse 1.84.1 (2023-05-26) +=========================== + +This patch release fixes a major issue with homeservers that do not have an `instance_map` defined but which do use workers. +If you have already upgraded to Synapse 1.84.0 and your homeserver is working normally, then there is no need to update to this patch release. + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse v1.84.0 where workers do not start up when no `instance_map` was provided. ([\#15672](https://github.com/matrix-org/synapse/issues/15672)) + + +Internal Changes +---------------- + +- Add `dch` and `notify-send` to the development Nix flake so that the release script can be used. ([\#15673](https://github.com/matrix-org/synapse/issues/15673)) + + +Synapse 1.84.0 (2023-05-23) +=========================== + +The `worker_replication_*` configuration settings have been deprecated in favour of configuring the main process consistently with other instances in the `instance_map`. The deprecated settings will be removed in Synapse v1.88.0, but changing your configuration in advance is recommended. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.84/docs/upgrade.md#upgrading-to-v1840) for more information. + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.84.0rc1 where errors during startup were not reported correctly on Python < 3.10. ([\#15599](https://github.com/matrix-org/synapse/issues/15599)) + + +Synapse 1.84.0rc1 (2023-05-16) +============================== + +Features +-------- + +- Add an option to prevent media downloads from configured domains. ([\#15197](https://github.com/matrix-org/synapse/issues/15197)) +- Add `forget_rooms_on_leave` config option to automatically forget rooms when users leave them or are removed from them. ([\#15224](https://github.com/matrix-org/synapse/issues/15224)) +- Add redis TLS configuration options. ([\#15312](https://github.com/matrix-org/synapse/issues/15312)) +- Add a config option to delay push notifications by a random amount, to discourage time-based profiling. ([\#15516](https://github.com/matrix-org/synapse/issues/15516)) +- Stabilize support for [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15528](https://github.com/matrix-org/synapse/issues/15528)) +- Implement [MSC4009](https://github.com/matrix-org/matrix-spec-proposals/pull/4009) to expand the supported characters in Matrix IDs. ([\#15536](https://github.com/matrix-org/synapse/issues/15536)) +- Advertise support for Matrix 1.6 on `/_matrix/client/versions`. ([\#15559](https://github.com/matrix-org/synapse/issues/15559)) +- Print full error and stack-trace of any exception that occurs during startup/initialization. ([\#15569](https://github.com/matrix-org/synapse/issues/15569)) + + +Bugfixes +-------- + +- Don't fail on federation over TOR where SRV queries are not supported. Contributed by Zdzichu. ([\#15523](https://github.com/matrix-org/synapse/issues/15523)) +- Experimental support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which rejects setting the `"m.push_rules"` via account data. ([\#15554](https://github.com/matrix-org/synapse/issues/15554), [\#15555](https://github.com/matrix-org/synapse/issues/15555)) +- Fix a long-standing bug where an invalid membership event could cause an internal server error. ([\#15564](https://github.com/matrix-org/synapse/issues/15564)) +- Require at least poetry-core v1.1.0. ([\#15566](https://github.com/matrix-org/synapse/issues/15566), [\#15571](https://github.com/matrix-org/synapse/issues/15571)) + + +Deprecations and Removals +------------------------- + +- Remove need for `worker_replication_*` based settings in worker configuration yaml by placing this data directly on the `instance_map` instead. ([\#15491](https://github.com/matrix-org/synapse/issues/15491)) + + +Updates to the Docker image +--------------------------- + +- Add pkg-config package to Stage 0 to be able to build Dockerfile on ppc64le architecture. ([\#15567](https://github.com/matrix-org/synapse/issues/15567)) + + +Improved Documentation +---------------------- + +- Clarify documentation of the "Create or modify account" Admin API. ([\#15544](https://github.com/matrix-org/synapse/issues/15544)) +- Fix path to the `statistics/database/rooms` admin API in documentation. ([\#15560](https://github.com/matrix-org/synapse/issues/15560)) +- Update and improve Mastodon Single Sign-On documentation. ([\#15587](https://github.com/matrix-org/synapse/issues/15587)) + + +Internal Changes +---------------- + +- Use oEmbed to generate URL previews for YouTube Shorts. ([\#15025](https://github.com/matrix-org/synapse/issues/15025)) +- Create new `Client` for use with HTTP Replication between workers. Contributed by Jason Little. ([\#15470](https://github.com/matrix-org/synapse/issues/15470)) +- Bump pyicu from 2.10.2 to 2.11. ([\#15509](https://github.com/matrix-org/synapse/issues/15509)) +- Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654). ([\#15522](https://github.com/matrix-org/synapse/issues/15522)) +- Don't use a trusted key server when running the demo scripts. ([\#15527](https://github.com/matrix-org/synapse/issues/15527)) +- Speed up rebuilding of the user directory for local users. ([\#15529](https://github.com/matrix-org/synapse/issues/15529)) +- Speed up deleting of old rows in `event_push_actions`. ([\#15531](https://github.com/matrix-org/synapse/issues/15531)) +- Install the `xmlsec` and `mdbook` packages and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. ([\#15532](https://github.com/matrix-org/synapse/issues/15532), [\#15533](https://github.com/matrix-org/synapse/issues/15533), [\#15545](https://github.com/matrix-org/synapse/issues/15545)) +- Implement [MSC3987](https://github.com/matrix-org/matrix-spec-proposals/pull/3987) by removing `"dont_notify"` from the list of actions in default push rules. ([\#15534](https://github.com/matrix-org/synapse/issues/15534)) +- Move various module API callback registration methods to a dedicated class. ([\#15535](https://github.com/matrix-org/synapse/issues/15535)) +- Proxy `/user/devices` federation queries to application services for [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984). ([\#15539](https://github.com/matrix-org/synapse/issues/15539)) +- Factor out an `is_mine_server_name` method. ([\#15542](https://github.com/matrix-org/synapse/issues/15542)) +- Allow running Complement tests using [podman](https://podman.io/) by adding a `PODMAN` environment variable to `scripts-dev/complement.sh`. ([\#15543](https://github.com/matrix-org/synapse/issues/15543)) +- Bump serde from 1.0.160 to 1.0.162. ([\#15548](https://github.com/matrix-org/synapse/issues/15548)) +- Bump types-setuptools from 67.6.0.5 to 67.7.0.1. ([\#15549](https://github.com/matrix-org/synapse/issues/15549)) +- Bump sentry-sdk from 1.19.1 to 1.22.1. ([\#15550](https://github.com/matrix-org/synapse/issues/15550)) +- Bump ruff from 0.0.259 to 0.0.265. ([\#15551](https://github.com/matrix-org/synapse/issues/15551)) +- Bump hiredis from 2.2.2 to 2.2.3. ([\#15552](https://github.com/matrix-org/synapse/issues/15552)) +- Bump types-requests from 2.29.0.0 to 2.30.0.0. ([\#15553](https://github.com/matrix-org/synapse/issues/15553)) +- Add `org.matrix.msc3981` info to `/_matrix/client/versions`. ([\#15558](https://github.com/matrix-org/synapse/issues/15558)) +- Declare unstable support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) under `/_matrix/client/versions` if the experimental implementation is enabled. ([\#15562](https://github.com/matrix-org/synapse/issues/15562)) +- Implement [MSC3821](https://github.com/matrix-org/matrix-spec-proposals/pull/3821) to update the redaction rules. ([\#15563](https://github.com/matrix-org/synapse/issues/15563)) +- Implement updated redaction rules from [MSC3389](https://github.com/matrix-org/matrix-spec-proposals/pull/3389). ([\#15565](https://github.com/matrix-org/synapse/issues/15565)) +- Allow `pip install` to use setuptools_rust 1.6.0 when building Synapse. ([\#15570](https://github.com/matrix-org/synapse/issues/15570)) +- Deal with upcoming Github Actions deprecations. ([\#15576](https://github.com/matrix-org/synapse/issues/15576)) +- Export `run_as_background_process` from the module API. ([\#15577](https://github.com/matrix-org/synapse/issues/15577)) +- Update build system requirements to allow building with poetry-core==1.6.0. ([\#15588](https://github.com/matrix-org/synapse/issues/15588)) +- Bump serde from 1.0.162 to 1.0.163. ([\#15589](https://github.com/matrix-org/synapse/issues/15589)) +- Bump phonenumbers from 8.13.7 to 8.13.11. ([\#15590](https://github.com/matrix-org/synapse/issues/15590)) +- Bump types-psycopg2 from 2.9.21.9 to 2.9.21.10. ([\#15591](https://github.com/matrix-org/synapse/issues/15591)) +- Bump types-commonmark from 0.9.2.2 to 0.9.2.3. ([\#15592](https://github.com/matrix-org/synapse/issues/15592)) +- Bump types-setuptools from 67.7.0.1 to 67.7.0.2. ([\#15594](https://github.com/matrix-org/synapse/issues/15594)) + + +Synapse 1.83.0 (2023-05-09) +=========================== + +No significant changes since 1.83.0rc1. + + +Synapse 1.83.0rc1 (2023-05-02) +============================== + +Features +-------- + +- Experimental support to recursively provide relations per [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981). ([\#15315](https://github.com/matrix-org/synapse/issues/15315)) +- Experimental support for [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970): Scope transaction IDs to devices. ([\#15318](https://github.com/matrix-org/synapse/issues/15318)) +- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/experimental_features.html) to support per-user feature flags. ([\#15344](https://github.com/matrix-org/synapse/issues/15344)) +- Add a module API to send an HTTP push notification. ([\#15387](https://github.com/matrix-org/synapse/issues/15387)) +- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/statistics.html#get-largest-rooms-by-size-in-database) to query the largest rooms by disk space used in the database. ([\#15482](https://github.com/matrix-org/synapse/issues/15482)) + + +Bugfixes +-------- + +- Disable push rule evaluation for rooms excluded from sync. ([\#15361](https://github.com/matrix-org/synapse/issues/15361)) +- Fix a long-standing bug where cached server key results which were directly fetched would not be properly re-used. ([\#15417](https://github.com/matrix-org/synapse/issues/15417)) +- Fix a bug introduced in Synapse 1.73.0 where some experimental push rules were returned by default. ([\#15494](https://github.com/matrix-org/synapse/issues/15494)) + + +Improved Documentation +---------------------- + +- Add Nginx loadbalancing example with sticky mxid for workers. ([\#15411](https://github.com/matrix-org/synapse/issues/15411)) +- Update outdated development docs that mention restrictions in versions of SQLite that we no longer support. ([\#15498](https://github.com/matrix-org/synapse/issues/15498)) + + +Internal Changes +---------------- + +- Speedup tests by caching HomeServerConfig instances. ([\#15284](https://github.com/matrix-org/synapse/issues/15284)) +- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#15356](https://github.com/matrix-org/synapse/issues/15356)) +- Always use multi-user device resync replication endpoints. ([\#15418](https://github.com/matrix-org/synapse/issues/15418)) +- Add column `full_user_id` to tables `profiles` and `user_filters`. ([\#15458](https://github.com/matrix-org/synapse/issues/15458)) +- Update support for [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) to allow always returning fallback-keys in a `/keys/claim` request. ([\#15462](https://github.com/matrix-org/synapse/issues/15462)) +- Improve type hints. ([\#15465](https://github.com/matrix-org/synapse/issues/15465), [\#15496](https://github.com/matrix-org/synapse/issues/15496), [\#15497](https://github.com/matrix-org/synapse/issues/15497)) +- Support claiming more than one OTK at a time. ([\#15468](https://github.com/matrix-org/synapse/issues/15468)) +- Bump types-pyyaml from 6.0.12.8 to 6.0.12.9. ([\#15471](https://github.com/matrix-org/synapse/issues/15471)) +- Bump pyasn1-modules from 0.2.8 to 0.3.0. ([\#15473](https://github.com/matrix-org/synapse/issues/15473)) +- Bump cryptography from 40.0.1 to 40.0.2. ([\#15474](https://github.com/matrix-org/synapse/issues/15474)) +- Bump types-netaddr from 0.8.0.7 to 0.8.0.8. ([\#15475](https://github.com/matrix-org/synapse/issues/15475)) +- Bump types-jsonschema from 4.17.0.6 to 4.17.0.7. ([\#15476](https://github.com/matrix-org/synapse/issues/15476)) +- Ask bug reporters to provide logs as text. ([\#15479](https://github.com/matrix-org/synapse/issues/15479)) +- Add a Nix flake for use as a development environment. ([\#15495](https://github.com/matrix-org/synapse/issues/15495)) +- Bump anyhow from 1.0.70 to 1.0.71. ([\#15507](https://github.com/matrix-org/synapse/issues/15507)) +- Bump types-pillow from 9.4.0.19 to 9.5.0.2. ([\#15508](https://github.com/matrix-org/synapse/issues/15508)) +- Bump packaging from 23.0 to 23.1. ([\#15510](https://github.com/matrix-org/synapse/issues/15510)) +- Bump types-requests from 2.28.11.16 to 2.29.0.0. ([\#15511](https://github.com/matrix-org/synapse/issues/15511)) +- Bump setuptools-rust from 1.5.2 to 1.6.0. ([\#15512](https://github.com/matrix-org/synapse/issues/15512)) +- Update the check_schema_delta script to account for when the schema version has been bumped locally. ([\#15466](https://github.com/matrix-org/synapse/issues/15466)) + + +Synapse 1.82.0 (2023-04-25) +=========================== + +No significant changes since 1.82.0rc1. + + +Synapse 1.82.0rc1 (2023-04-18) +============================== + +Features +-------- + +- Allow loading the `/directory/room/{roomAlias}` endpoint on workers. ([\#15333](https://github.com/matrix-org/synapse/issues/15333)) +- Add some validation to `instance_map` configuration loading. ([\#15431](https://github.com/matrix-org/synapse/issues/15431)) +- Allow loading the `/capabilities` endpoint on workers. ([\#15436](https://github.com/matrix-org/synapse/issues/15436)) + + +Bugfixes +-------- + +- Delete server-side backup keys when deactivating an account. ([\#15181](https://github.com/matrix-org/synapse/issues/15181)) +- Fix and document untold assumption that `on_logged_out` module hooks will be called before the deletion of pushers. ([\#15410](https://github.com/matrix-org/synapse/issues/15410)) +- Improve robustness when handling a perspective key response by deduplicating received server keys. ([\#15423](https://github.com/matrix-org/synapse/issues/15423)) +- Synapse now correctly fails to start if the config option `app_service_config_files` is not a list. ([\#15425](https://github.com/matrix-org/synapse/issues/15425)) +- Disable loading `RefreshTokenServlet` (`/_matrix/client/(r0|v3|unstable)/refresh`) on workers. ([\#15428](https://github.com/matrix-org/synapse/issues/15428)) + + +Improved Documentation +---------------------- + +- Note that the `delete_stale_devices_after` background job always runs on the main process. ([\#15452](https://github.com/matrix-org/synapse/issues/15452)) + + +Deprecations and Removals +------------------------- + +- Remove the broken, unspecced registration fallback. Note that the *login* fallback is unaffected by this change. ([\#15405](https://github.com/matrix-org/synapse/issues/15405)) + + +Internal Changes +---------------- + +- Bump black from 23.1.0 to 23.3.0. ([\#15372](https://github.com/matrix-org/synapse/issues/15372)) +- Bump pyopenssl from 23.1.0 to 23.1.1. ([\#15373](https://github.com/matrix-org/synapse/issues/15373)) +- Bump types-psycopg2 from 2.9.21.8 to 2.9.21.9. ([\#15374](https://github.com/matrix-org/synapse/issues/15374)) +- Bump types-netaddr from 0.8.0.6 to 0.8.0.7. ([\#15375](https://github.com/matrix-org/synapse/issues/15375)) +- Bump types-opentracing from 2.4.10.3 to 2.4.10.4. ([\#15376](https://github.com/matrix-org/synapse/issues/15376)) +- Bump dawidd6/action-download-artifact from 2.26.0 to 2.26.1. ([\#15404](https://github.com/matrix-org/synapse/issues/15404)) +- Bump parameterized from 0.8.1 to 0.9.0. ([\#15412](https://github.com/matrix-org/synapse/issues/15412)) +- Bump types-pillow from 9.4.0.17 to 9.4.0.19. ([\#15413](https://github.com/matrix-org/synapse/issues/15413)) +- Bump sentry-sdk from 1.17.0 to 1.19.1. ([\#15414](https://github.com/matrix-org/synapse/issues/15414)) +- Bump immutabledict from 2.2.3 to 2.2.4. ([\#15415](https://github.com/matrix-org/synapse/issues/15415)) +- Bump dawidd6/action-download-artifact from 2.26.1 to 2.27.0. ([\#15441](https://github.com/matrix-org/synapse/issues/15441)) +- Bump serde_json from 1.0.95 to 1.0.96. ([\#15442](https://github.com/matrix-org/synapse/issues/15442)) +- Bump serde from 1.0.159 to 1.0.160. ([\#15443](https://github.com/matrix-org/synapse/issues/15443)) +- Bump pillow from 9.4.0 to 9.5.0. ([\#15444](https://github.com/matrix-org/synapse/issues/15444)) +- Bump furo from 2023.3.23 to 2023.3.27. ([\#15445](https://github.com/matrix-org/synapse/issues/15445)) +- Bump types-pyopenssl from 23.1.0.0 to 23.1.0.2. ([\#15446](https://github.com/matrix-org/synapse/issues/15446)) +- Bump mypy from 1.0.0 to 1.0.1. ([\#15447](https://github.com/matrix-org/synapse/issues/15447)) +- Bump psycopg2 from 2.9.5 to 2.9.6. ([\#15448](https://github.com/matrix-org/synapse/issues/15448)) +- Improve DB performance of clearing out old data from `stream_ordering_to_exterm`. ([\#15382](https://github.com/matrix-org/synapse/issues/15382), [\#15429](https://github.com/matrix-org/synapse/issues/15429)) +- Implement [MSC3989](https://github.com/matrix-org/matrix-spec-proposals/pull/3989) redaction algorithm. ([\#15393](https://github.com/matrix-org/synapse/issues/15393)) +- Implement [MSC2175](https://github.com/matrix-org/matrix-doc/pull/2175) to stop adding `creator` to create events. ([\#15394](https://github.com/matrix-org/synapse/issues/15394)) +- Implement [MSC2174](https://github.com/matrix-org/matrix-spec-proposals/pull/2174) to move the `redacts` key to a `content` property. ([\#15395](https://github.com/matrix-org/synapse/issues/15395)) +- Trust dtonlay/rust-toolchain in CI. ([\#15406](https://github.com/matrix-org/synapse/issues/15406)) +- Explicitly install Synapse during typechecking in CI. ([\#15409](https://github.com/matrix-org/synapse/issues/15409)) +- Only load the SSO redirect servlet if SSO is enabled. ([\#15421](https://github.com/matrix-org/synapse/issues/15421)) +- Refactor `SimpleHttpClient` to pull out a base class. ([\#15427](https://github.com/matrix-org/synapse/issues/15427)) +- Improve type hints. ([\#15432](https://github.com/matrix-org/synapse/issues/15432)) +- Convert async to normal tests in `TestSSOHandler`. ([\#15433](https://github.com/matrix-org/synapse/issues/15433)) +- Speed up the user directory background update. ([\#15435](https://github.com/matrix-org/synapse/issues/15435)) +- Disable directory listing for static resources in `/_matrix/static/`. ([\#15438](https://github.com/matrix-org/synapse/issues/15438)) +- Move various module API callback registration methods to a dedicated class. ([\#15453](https://github.com/matrix-org/synapse/issues/15453)) + + +Synapse 1.81.0 (2023-04-11) +=========================== + +Synapse now attempts the versioned appservice paths before falling back to the +[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). +Usage of the legacy routes should be considered deprecated. + +Additionally, Synapse has supported sending the application service access token +via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization) +since v1.70.0. For backwards compatibility it is *also* sent as the `access_token` +query parameter. This is insecure and should be considered deprecated. + +A future version of Synapse (v1.88.0 or later) will remove support for legacy +application service routes and query parameter authorization. + + +No significant changes since 1.81.0rc2. + + +Synapse 1.81.0rc2 (2023-04-06) +============================== + +Bugfixes +-------- + +- Fix the `set_device_id_for_pushers_txn` background update crash. ([\#15391](https://github.com/matrix-org/synapse/issues/15391)) + + +Internal Changes +---------------- + +- Update CI to run complement under the latest stable go version. ([\#15403](https://github.com/matrix-org/synapse/issues/15403)) + + +Synapse 1.81.0rc1 (2023-04-04) +============================== + +Features +-------- + +- Add the ability to enable/disable registrations when in the OIDC flow. ([\#14978](https://github.com/matrix-org/synapse/issues/14978)) +- Add a primitive helper script for listing worker endpoints. ([\#15243](https://github.com/matrix-org/synapse/issues/15243)) +- Experimental support for passing One Time Key and device key requests to application services ([MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) and [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984)). ([\#15314](https://github.com/matrix-org/synapse/issues/15314), [\#15321](https://github.com/matrix-org/synapse/issues/15321)) +- Allow loading `/password_policy` endpoint on workers. ([\#15331](https://github.com/matrix-org/synapse/issues/15331)) +- Add experimental support for Unix sockets. Contributed by Jason Little. ([\#15353](https://github.com/matrix-org/synapse/issues/15353)) +- Build Debian packages for Ubuntu 23.04 (Lunar Lobster). ([\#15381](https://github.com/matrix-org/synapse/issues/15381)) + + +Bugfixes +-------- + +- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled. ([\#15295](https://github.com/matrix-org/synapse/issues/15295)) +- Fix a bug introduced in Synapse v1.55.0 which could delay remote homeservers being able to decrypt encrypted messages sent by local users. ([\#15297](https://github.com/matrix-org/synapse/issues/15297)) +- Add a check to [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite) + to ensure that the sqlite database passed to the script exists before trying to port from it. ([\#15306](https://github.com/matrix-org/synapse/issues/15306)) +- Fix a bug introduced in Synapse 1.76.0 where responses from worker deployments could include an internal `_INT_STREAM_POS` key. ([\#15309](https://github.com/matrix-org/synapse/issues/15309)) +- Fix a long-standing bug that Synpase only used the [legacy appservice routes](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). ([\#15317](https://github.com/matrix-org/synapse/issues/15317)) +- Fix a long-standing bug preventing users from rejoining rooms after being banned and unbanned over federation. Contributed by Nico. ([\#15323](https://github.com/matrix-org/synapse/issues/15323)) +- Fix bug in worker mode where on a rolling restart of workers the "typing" worker would consume 100% CPU until it got restarted. ([\#15332](https://github.com/matrix-org/synapse/issues/15332)) +- Fix a long-standing bug where some to_device messages could be dropped when using workers. ([\#15349](https://github.com/matrix-org/synapse/issues/15349)) +- Fix a bug introduced in Synapse 1.70.0 where the background sync from a faster join could spin for hours when one of the events involved had been marked for backoff. ([\#15351](https://github.com/matrix-org/synapse/issues/15351)) +- Fix missing app variable in mail subject for password resets. Contributed by Cyberes. ([\#15352](https://github.com/matrix-org/synapse/issues/15352)) +- Fix a rare bug introduced in Synapse 1.66.0 where initial syncs would fail when the user had been kicked from a faster joined room that had not finished syncing. ([\#15383](https://github.com/matrix-org/synapse/issues/15383)) + + +Improved Documentation +---------------------- + +- Fix a typo in login requests ratelimit defaults. ([\#15341](https://github.com/matrix-org/synapse/issues/15341)) +- Add some clarification to the doc/comments regarding TCP replication. ([\#15354](https://github.com/matrix-org/synapse/issues/15354)) +- Note that Synapse 1.74 queued a rebuild of the user directory tables. ([\#15386](https://github.com/matrix-org/synapse/issues/15386)) + + +Internal Changes +---------------- + +- Use `immutabledict` instead of `frozendict`. ([\#15113](https://github.com/matrix-org/synapse/issues/15113)) +- Add developer documentation for the Federation Sender and add a documentation mechanism using Sphinx. ([\#15265](https://github.com/matrix-org/synapse/issues/15265), [\#15336](https://github.com/matrix-org/synapse/issues/15336)) +- Make the pushers rely on the `device_id` instead of the `access_token_id` for various operations. ([\#15280](https://github.com/matrix-org/synapse/issues/15280)) +- Bump sentry-sdk from 1.15.0 to 1.17.0. ([\#15285](https://github.com/matrix-org/synapse/issues/15285)) +- Allow running the Twisted trunk job against other branches. ([\#15302](https://github.com/matrix-org/synapse/issues/15302)) +- Remind the releaser to ask for changelog feedback in [#synapse-dev](https://matrix.to/#/#synapse-dev:matrix.org). ([\#15303](https://github.com/matrix-org/synapse/issues/15303)) +- Bump dtolnay/rust-toolchain from e12eda571dc9a5ee5d58eecf4738ec291c66f295 to fc3253060d0c959bea12a59f10f8391454a0b02d. ([\#15304](https://github.com/matrix-org/synapse/issues/15304)) +- Reject events with an invalid "mentions" property per [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15311](https://github.com/matrix-org/synapse/issues/15311)) +- As an optimisation, use `TRUNCATE` on Postgres when clearing the user directory tables. ([\#15316](https://github.com/matrix-org/synapse/issues/15316)) +- Fix `.gitignore` rule for the Complement source tarball downloaded automatically by `complement.sh`. ([\#15319](https://github.com/matrix-org/synapse/issues/15319)) +- Bump serde from 1.0.157 to 1.0.158. ([\#15324](https://github.com/matrix-org/synapse/issues/15324)) +- Bump regex from 1.7.1 to 1.7.3. ([\#15325](https://github.com/matrix-org/synapse/issues/15325)) +- Bump types-pyopenssl from 23.0.0.4 to 23.1.0.0. ([\#15326](https://github.com/matrix-org/synapse/issues/15326)) +- Bump furo from 2022.12.7 to 2023.3.23. ([\#15327](https://github.com/matrix-org/synapse/issues/15327)) +- Bump ruff from 0.0.252 to 0.0.259. ([\#15328](https://github.com/matrix-org/synapse/issues/15328)) +- Bump cryptography from 40.0.0 to 40.0.1. ([\#15329](https://github.com/matrix-org/synapse/issues/15329)) +- Bump mypy-zope from 0.9.0 to 0.9.1. ([\#15330](https://github.com/matrix-org/synapse/issues/15330)) +- Speed up unit tests when using SQLite3. ([\#15334](https://github.com/matrix-org/synapse/issues/15334)) +- Speed up pydantic CI job. ([\#15339](https://github.com/matrix-org/synapse/issues/15339)) +- Speed up sample config CI job. ([\#15340](https://github.com/matrix-org/synapse/issues/15340)) +- Fix copyright year in SSO footer template. ([\#15358](https://github.com/matrix-org/synapse/issues/15358)) +- Bump peaceiris/actions-gh-pages from 3.9.2 to 3.9.3. ([\#15369](https://github.com/matrix-org/synapse/issues/15369)) +- Bump serde from 1.0.158 to 1.0.159. ([\#15370](https://github.com/matrix-org/synapse/issues/15370)) +- Bump serde_json from 1.0.94 to 1.0.95. ([\#15371](https://github.com/matrix-org/synapse/issues/15371)) +- Speed up membership queries for users with forgotten rooms. ([\#15385](https://github.com/matrix-org/synapse/issues/15385)) + + +Synapse 1.80.0 (2023-03-28) +=========================== + +No significant changes since 1.80.0rc2. + + +Synapse 1.80.0rc2 (2023-03-22) +============================== + +Bugfixes +-------- + +- Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). ([\#15298](https://github.com/matrix-org/synapse/issues/15298), [\#15300](https://github.com/matrix-org/synapse/issues/15300)) +- Fix a bug introduced in Synapse 1.75.0rc1 where the [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite) + would fail to open the SQLite database. ([\#15301](https://github.com/matrix-org/synapse/issues/15301)) + + +Synapse 1.80.0rc1 (2023-03-21) +============================== + +Features +-------- + +- Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. ([\#15187](https://github.com/matrix-org/synapse/issues/15187)) +- Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15249](https://github.com/matrix-org/synapse/issues/15249)) +- Allow loading `/register/available` endpoint on workers. ([\#15268](https://github.com/matrix-org/synapse/issues/15268)) +- Improve performance of creating and authenticating events. ([\#15195](https://github.com/matrix-org/synapse/issues/15195)) +- Add topic and name events to group of events that are batch persisted when creating a room. ([\#15229](https://github.com/matrix-org/synapse/issues/15229)) + + +Bugfixes +-------- + +- Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. ([\#14755](https://github.com/matrix-org/synapse/issues/14755), [\#14756](https://github.com/matrix-org/synapse/issues/14756)) +- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. ([\#15190](https://github.com/matrix-org/synapse/issues/15190)) +- Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. ([\#15232](https://github.com/matrix-org/synapse/issues/15232)) +- Fix a long-standing error when sending message into deleted room. ([\#15235](https://github.com/matrix-org/synapse/issues/15235)) + + +Updates to the Docker image +--------------------------- + +- Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel. ([\#15239](https://github.com/matrix-org/synapse/issues/15239)) +- Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). ([\#15281](https://github.com/matrix-org/synapse/issues/15281), [\#15282](https://github.com/matrix-org/synapse/issues/15282)) + + +Improved Documentation +---------------------- + +- Add a missing endpoint to the workers documentation. ([\#15223](https://github.com/matrix-org/synapse/issues/15223)) + + +Internal Changes +---------------- + +- Add additional functionality to declaring worker types when starting Complement in worker mode. ([\#14921](https://github.com/matrix-org/synapse/issues/14921)) +- Add `Synapse-Trace-Id` to `access-control-expose-headers` header. ([\#14974](https://github.com/matrix-org/synapse/issues/14974)) +- Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. ([\#15200](https://github.com/matrix-org/synapse/issues/15200)) +- Improve log lines when purging rooms. ([\#15222](https://github.com/matrix-org/synapse/issues/15222)) +- Improve type hints. ([\#15230](https://github.com/matrix-org/synapse/issues/15230), [\#15231](https://github.com/matrix-org/synapse/issues/15231), [\#15238](https://github.com/matrix-org/synapse/issues/15238)) +- Move various module API callback registration methods to a dedicated class. ([\#15237](https://github.com/matrix-org/synapse/issues/15237)) +- Configure GitHub Actions for merge queues. ([\#15244](https://github.com/matrix-org/synapse/issues/15244)) +- Add schema comments about the `destinations` and `destination_rooms` tables. ([\#15247](https://github.com/matrix-org/synapse/issues/15247)) +- Skip processing of auto-join room behaviour if there are no auto-join rooms configured. ([\#15262](https://github.com/matrix-org/synapse/issues/15262)) +- Remove unused store method `_set_destination_retry_timings_emulated`. ([\#15266](https://github.com/matrix-org/synapse/issues/15266)) +- Reorganize URL preview code. ([\#15269](https://github.com/matrix-org/synapse/issues/15269)) +- Clean-up direct TCP replication code. ([\#15272](https://github.com/matrix-org/synapse/issues/15272), [\#15274](https://github.com/matrix-org/synapse/issues/15274)) +- Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. ([\#15275](https://github.com/matrix-org/synapse/issues/15275)) +- Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15293](https://github.com/matrix-org/synapse/issues/15293)) +- Bump hiredis from 2.2.1 to 2.2.2. ([\#15252](https://github.com/matrix-org/synapse/issues/15252)) +- Bump serde from 1.0.152 to 1.0.155. ([\#15253](https://github.com/matrix-org/synapse/issues/15253)) +- Bump pysaml2 from 7.2.1 to 7.3.1. ([\#15254](https://github.com/matrix-org/synapse/issues/15254)) +- Bump msgpack from 1.0.4 to 1.0.5. ([\#15255](https://github.com/matrix-org/synapse/issues/15255)) +- Bump gitpython from 3.1.30 to 3.1.31. ([\#15256](https://github.com/matrix-org/synapse/issues/15256)) +- Bump cryptography from 39.0.1 to 39.0.2. ([\#15257](https://github.com/matrix-org/synapse/issues/15257)) +- Bump pydantic from 1.10.4 to 1.10.6. ([\#15286](https://github.com/matrix-org/synapse/issues/15286)) +- Bump serde from 1.0.155 to 1.0.157. ([\#15287](https://github.com/matrix-org/synapse/issues/15287)) +- Bump anyhow from 1.0.69 to 1.0.70. ([\#15288](https://github.com/matrix-org/synapse/issues/15288)) +- Bump txredisapi from 1.4.7 to 1.4.9. ([\#15289](https://github.com/matrix-org/synapse/issues/15289)) +- Bump pygithub from 1.57 to 1.58.1. ([\#15290](https://github.com/matrix-org/synapse/issues/15290)) +- Bump types-requests from 2.28.11.12 to 2.28.11.15. ([\#15291](https://github.com/matrix-org/synapse/issues/15291)) + + + +Synapse 1.79.0 (2023-03-14) +=========================== + +No significant changes since 1.79.0rc2. + + +Synapse 1.79.0rc2 (2023-03-13) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. ([\#15227](https://github.com/matrix-org/synapse/issues/15227)) +- Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. ([\#15248](https://github.com/matrix-org/synapse/issues/15248)) + + +Internal Changes +---------------- + +- Refactor `filter_events_for_server`. ([\#15240](https://github.com/matrix-org/synapse/issues/15240)) + + +Synapse 1.79.0rc1 (2023-03-07) +============================== + +Features +-------- + +- Add two new Third Party Rules module API callbacks: [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier) and [`on_remove_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_remove_user_third_party_identifier). ([\#15044](https://github.com/matrix-org/synapse/issues/15044)) +- Experimental support for [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967) to not require UIA for setting up cross-signing on first use. ([\#15077](https://github.com/matrix-org/synapse/issues/15077)) +- Add media information to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.79/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#15107](https://github.com/matrix-org/synapse/issues/15107)) +- Add an [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) to delete a [specific event report](https://spec.matrix.org/v1.6/client-server-api/#reporting-content). ([\#15116](https://github.com/matrix-org/synapse/issues/15116)) +- Add support for knocking to workers. ([\#15133](https://github.com/matrix-org/synapse/issues/15133)) +- Allow use of the `/filter` Client-Server APIs on workers. ([\#15134](https://github.com/matrix-org/synapse/issues/15134)) +- Update support for [MSC2677](https://github.com/matrix-org/matrix-spec-proposals/pull/2677): remove support for server-side aggregation of reactions. ([\#15172](https://github.com/matrix-org/synapse/issues/15172)) +- Stabilise support for [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758): `event_property_is` push condition. ([\#15185](https://github.com/matrix-org/synapse/issues/15185)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.75 that caused experimental support for deleting account data to raise an internal server error while using an account data writer worker. ([\#14869](https://github.com/matrix-org/synapse/issues/14869)) +- Fix a long-standing bug where Synapse handled an unspecced field on push rules. ([\#15088](https://github.com/matrix-org/synapse/issues/15088)) +- Fix a long-standing bug where a URL preview would break if the discovered oEmbed failed to download. ([\#15092](https://github.com/matrix-org/synapse/issues/15092)) +- Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. ([\#15163](https://github.com/matrix-org/synapse/issues/15163)) +- Add the `transaction_id` in the events included in many endpoints' responses. ([\#15174](https://github.com/matrix-org/synapse/issues/15174)) +- Fix a bug introduced in Synapse 1.78.0 where requests to claim dehydrated devices would fail with a `405` error. ([\#15180](https://github.com/matrix-org/synapse/issues/15180)) +- Stop applying edits when bundling aggregations, per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925). ([\#15193](https://github.com/matrix-org/synapse/issues/15193)) +- Fix a long-standing bug where the user directory search was not case-insensitive for accented characters. ([\#15143](https://github.com/matrix-org/synapse/issues/15143)) + + +Updates to the Docker image +--------------------------- + +- Improve startup logging in the with-workers Docker image. ([\#15186](https://github.com/matrix-org/synapse/issues/15186)) + + +Improved Documentation +---------------------- + +- Document how to use caches in a module. ([\#14026](https://github.com/matrix-org/synapse/issues/14026)) +- Clarify which worker processes the ThirdPartyRules' [`on_new_event`](https://matrix-org.github.io/synapse/v1.78/modules/third_party_rules_callbacks.html#on_new_event) module API callback runs on. ([\#15071](https://github.com/matrix-org/synapse/issues/15071)) +- Document using [Shibboleth](https://www.shibboleth.net/) as an OpenID Provider. ([\#15112](https://github.com/matrix-org/synapse/issues/15112)) +- Correct reference to `federation_verify_certificates` in configuration documentation. ([\#15139](https://github.com/matrix-org/synapse/issues/15139)) +- Correct small documentation errors in some `MatrixFederationHttpClient` methods. ([\#15148](https://github.com/matrix-org/synapse/issues/15148)) +- Correct the description of the behavior of `registration_shared_secret_path` on startup. ([\#15168](https://github.com/matrix-org/synapse/issues/15168)) + + +Deprecations and Removals +------------------------- + +- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044](https://github.com/matrix-org/synapse/issues/15044)) +- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093)) +- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189)) +- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137)) +- Remove unspecced and buggy `PUT` method on the unstable `/rooms/<room_id>/batch_send` endpoint. ([\#15199](https://github.com/matrix-org/synapse/issues/15199)) + + +Internal Changes +---------------- + +- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14101](https://github.com/matrix-org/synapse/issues/14101)) +- Batch up storing state groups when creating a new room. ([\#14918](https://github.com/matrix-org/synapse/issues/14918)) +- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15051](https://github.com/matrix-org/synapse/issues/15051)) +- Refactor writing json data in `FileExfiltrationWriter`. ([\#15095](https://github.com/matrix-org/synapse/issues/15095)) +- Tighten the login ratelimit defaults. ([\#15135](https://github.com/matrix-org/synapse/issues/15135)) +- Fix a typo in an experimental config setting. ([\#15138](https://github.com/matrix-org/synapse/issues/15138)) +- Refactor the media modules. ([\#15146](https://github.com/matrix-org/synapse/issues/15146), [\#15175](https://github.com/matrix-org/synapse/issues/15175)) +- Improve type hints. ([\#15164](https://github.com/matrix-org/synapse/issues/15164)) +- Move `get_event_report` and `get_event_reports_paginate` from `RoomStore` to `RoomWorkerStore`. ([\#15165](https://github.com/matrix-org/synapse/issues/15165)) +- Remove dangling reference to being a reference implementation in docstring. ([\#15167](https://github.com/matrix-org/synapse/issues/15167)) +- Add an option to force a rebuild of the "editable" complement image. ([\#15184](https://github.com/matrix-org/synapse/issues/15184)) +- Use nightly rustfmt in CI. ([\#15188](https://github.com/matrix-org/synapse/issues/15188)) +- Add a `get_next_txn` method to `StreamIdGenerator` to match `MultiWriterIdGenerator`. ([\#15191](https://github.com/matrix-org/synapse/issues/15191)) +- Combine `AbstractStreamIdTracker` and `AbstractStreamIdGenerator`. ([\#15192](https://github.com/matrix-org/synapse/issues/15192)) +- Automatically fix errors with `ruff`. ([\#15194](https://github.com/matrix-org/synapse/issues/15194)) +- Refactor database transaction for query users' devices to reduce database pool contention. ([\#15215](https://github.com/matrix-org/synapse/issues/15215)) +- Correct `test_icu_word_boundary_punctuation` so that it passes with the ICU versions available in Alpine and macOS. ([\#15177](https://github.com/matrix-org/synapse/issues/15177)) + +<details><summary>Locked dependency updates</summary> + + - Bump actions/checkout from 2 to 3. ([\#15155](https://github.com/matrix-org/synapse/issues/15155)) + - Bump black from 22.12.0 to 23.1.0. ([\#15103](https://github.com/matrix-org/synapse/issues/15103)) + - Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0. ([\#15152](https://github.com/matrix-org/synapse/issues/15152)) + - Bump docker/login-action from 1 to 2. ([\#15154](https://github.com/matrix-org/synapse/issues/15154)) + - Bump matrix-org/backend-meta from 1 to 2. ([\#15156](https://github.com/matrix-org/synapse/issues/15156)) + - Bump ruff from 0.0.237 to 0.0.252. ([\#15159](https://github.com/matrix-org/synapse/issues/15159)) + - Bump serde_json from 1.0.93 to 1.0.94. ([\#15214](https://github.com/matrix-org/synapse/issues/15214)) + - Bump types-commonmark from 0.9.2.1 to 0.9.2.2. ([\#15209](https://github.com/matrix-org/synapse/issues/15209)) + - Bump types-opentracing from 2.4.10.1 to 2.4.10.3. ([\#15158](https://github.com/matrix-org/synapse/issues/15158)) + - Bump types-pillow from 9.4.0.13 to 9.4.0.17. ([\#15211](https://github.com/matrix-org/synapse/issues/15211)) + - Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8. ([\#15210](https://github.com/matrix-org/synapse/issues/15210)) + - Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4. ([\#15213](https://github.com/matrix-org/synapse/issues/15213)) + - Bump types-setuptools from 67.3.0.1 to 67.4.0.3. ([\#15160](https://github.com/matrix-org/synapse/issues/15160)) + - Bump types-setuptools from 67.4.0.3 to 67.5.0.0. ([\#15212](https://github.com/matrix-org/synapse/issues/15212)) + - Bump typing-extensions from 4.4.0 to 4.5.0. ([\#15157](https://github.com/matrix-org/synapse/issues/15157)) +</details> + + +Synapse 1.78.0 (2023-02-28) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.76 where 5s delays would occasionally occur in deployments using workers. ([\#15150](https://github.com/matrix-org/synapse/issues/15150)) + + +Synapse 1.78.0rc1 (2023-02-21) +============================== + +Features +-------- + +- Implement the experimental `exact_event_match` push rule condition from [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758). ([\#14964](https://github.com/matrix-org/synapse/issues/14964)) +- Add account data to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.78/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14969](https://github.com/matrix-org/synapse/issues/14969)) +- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to disambiguate push rule keys with dots in them. ([\#15004](https://github.com/matrix-org/synapse/issues/15004)) +- Allow Synapse to use a specific Redis [logical database](https://redis.io/commands/select/) in worker-mode deployments. ([\#15034](https://github.com/matrix-org/synapse/issues/15034)) +- Tag opentracing spans for federation requests with the name of the worker serving the request. ([\#15042](https://github.com/matrix-org/synapse/issues/15042)) +- Implement the experimental `exact_event_property_contains` push rule condition from [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966). ([\#15045](https://github.com/matrix-org/synapse/issues/15045)) +- Remove spurious `dont_notify` action from the defaults for the `.m.rule.reaction` pushrule. ([\#15073](https://github.com/matrix-org/synapse/issues/15073)) +- Update the error code returned when user sends a duplicate annotation. ([\#15075](https://github.com/matrix-org/synapse/issues/15075)) + + +Bugfixes +-------- + +- Prevent clients from reporting nonexistent events. ([\#13779](https://github.com/matrix-org/synapse/issues/13779)) +- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14605](https://github.com/matrix-org/synapse/issues/14605)) +- Fix a long-standing bug where the room aliases returned could be corrupted. ([\#15038](https://github.com/matrix-org/synapse/issues/15038)) +- Fix a bug introduced in Synapse 1.76.0 where partially-joined rooms could not be deleted using the [purge room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api). ([\#15068](https://github.com/matrix-org/synapse/issues/15068)) +- Fix a long-standing bug where federated joins would fail if the first server in the list of servers to try is not in the room. ([\#15074](https://github.com/matrix-org/synapse/issues/15074)) +- Fix a bug introduced in Synapse v1.74.0 where searching with colons when using ICU for search term tokenisation would fail with an error. ([\#15079](https://github.com/matrix-org/synapse/issues/15079)) +- Reduce the likelihood of a rare race condition where rejoining a restricted room over federation would fail. ([\#15080](https://github.com/matrix-org/synapse/issues/15080)) +- Fix a bug introduced in Synapse 1.76 where workers would fail to start if the `health` listener was configured. ([\#15096](https://github.com/matrix-org/synapse/issues/15096)) +- Fix a bug introduced in Synapse 1.75 where the [portdb script](https://matrix-org.github.io/synapse/release-v1.78/postgres.html#porting-from-sqlite) would fail to run after a room had been faster-joined. ([\#15108](https://github.com/matrix-org/synapse/issues/15108)) + + +Improved Documentation +---------------------- + +- Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. ([\#14892](https://github.com/matrix-org/synapse/issues/14892), [\#15022](https://github.com/matrix-org/synapse/issues/15022)) +- Update delegation documentation to clarify that SRV DNS delegation does not eliminate all needs to serve files from .well-known locations. Contributed by @williamkray. ([\#14959](https://github.com/matrix-org/synapse/issues/14959)) +- Fix a mistake in registration_shared_secret_path docs. ([\#15078](https://github.com/matrix-org/synapse/issues/15078)) +- Refer to a more recent blog post on the [Database Maintenance Tools](https://matrix-org.github.io/synapse/latest/usage/administration/database_maintenance_tools.html) page. Contributed by @jahway603. ([\#15083](https://github.com/matrix-org/synapse/issues/15083)) + + +Internal Changes +---------------- + +- Re-type hint some collections as read-only. ([\#13755](https://github.com/matrix-org/synapse/issues/13755)) +- Faster joins: don't stall when another user joins during a partial-state room resync. ([\#14606](https://github.com/matrix-org/synapse/issues/14606)) +- Add a class `UnpersistedEventContext` to allow for the batching up of storing state groups. ([\#14675](https://github.com/matrix-org/synapse/issues/14675)) +- Add a check to ensure that locked dependencies have source distributions available. ([\#14742](https://github.com/matrix-org/synapse/issues/14742)) +- Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. ([\#14834](https://github.com/matrix-org/synapse/issues/14834)) +- Prevent `WARNING: there is already a transaction in progress` lines appearing in PostgreSQL's logs on some occasions. ([\#14840](https://github.com/matrix-org/synapse/issues/14840)) +- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14929](https://github.com/matrix-org/synapse/issues/14929)) +- Improve performance of `/sync` in a few situations. ([\#14973](https://github.com/matrix-org/synapse/issues/14973)) +- Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room. ([\#14977](https://github.com/matrix-org/synapse/issues/14977)) +- Skip calculating unread push actions in /sync when enable_push is false. ([\#14980](https://github.com/matrix-org/synapse/issues/14980)) +- Add a schema dump symlinks inside `contrib`, to make it easier for IDEs to interrogate Synapse's database schema. ([\#14982](https://github.com/matrix-org/synapse/issues/14982)) +- Improve type hints. ([\#15008](https://github.com/matrix-org/synapse/issues/15008), [\#15026](https://github.com/matrix-org/synapse/issues/15026), [\#15027](https://github.com/matrix-org/synapse/issues/15027), [\#15028](https://github.com/matrix-org/synapse/issues/15028), [\#15031](https://github.com/matrix-org/synapse/issues/15031), [\#15035](https://github.com/matrix-org/synapse/issues/15035), [\#15052](https://github.com/matrix-org/synapse/issues/15052), [\#15072](https://github.com/matrix-org/synapse/issues/15072), [\#15084](https://github.com/matrix-org/synapse/issues/15084)) +- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15037](https://github.com/matrix-org/synapse/issues/15037)) +- Avoid mutating a cached value in `get_user_devices_from_cache`. ([\#15040](https://github.com/matrix-org/synapse/issues/15040)) +- Fix a rare exception in logs on start up. ([\#15041](https://github.com/matrix-org/synapse/issues/15041)) +- Update pyo3-log to v0.8.1. ([\#15043](https://github.com/matrix-org/synapse/issues/15043)) +- Avoid mutating cached values in `_generate_sync_entry_for_account_data`. ([\#15047](https://github.com/matrix-org/synapse/issues/15047)) +- Refactor arguments of `try_unbind_threepid` and `_try_unbind_threepid_with_id_server` to not use dictionaries. ([\#15053](https://github.com/matrix-org/synapse/issues/15053)) +- Merge debug logging from the hotfixes branch. ([\#15054](https://github.com/matrix-org/synapse/issues/15054)) +- Faster joins: omit device list updates originating from partial state rooms in /sync responses without lazy loading of members enabled. ([\#15069](https://github.com/matrix-org/synapse/issues/15069)) +- Fix clashing database transaction name. ([\#15070](https://github.com/matrix-org/synapse/issues/15070)) +- Upper-bound frozendict dependency. This works around us being unable to test installing our wheels against Python 3.11 in CI. ([\#15114](https://github.com/matrix-org/synapse/issues/15114)) +- Tweak logging for when a worker waits for its view of a replication stream to catch up. ([\#15120](https://github.com/matrix-org/synapse/issues/15120)) + +<details><summary>Locked dependency updates</summary> + +- Bump bleach from 5.0.1 to 6.0.0. ([\#15059](https://github.com/matrix-org/synapse/issues/15059)) +- Bump cryptography from 38.0.4 to 39.0.1. ([\#15020](https://github.com/matrix-org/synapse/issues/15020)) +- Bump ruff version from 0.0.230 to 0.0.237. ([\#15033](https://github.com/matrix-org/synapse/issues/15033)) +- Bump dtolnay/rust-toolchain from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3. ([\#15060](https://github.com/matrix-org/synapse/issues/15060)) +- Bump systemd-python from 234 to 235. ([\#15061](https://github.com/matrix-org/synapse/issues/15061)) +- Bump serde_json from 1.0.92 to 1.0.93. ([\#15062](https://github.com/matrix-org/synapse/issues/15062)) +- Bump types-requests from 2.28.11.8 to 2.28.11.12. ([\#15063](https://github.com/matrix-org/synapse/issues/15063)) +- Bump types-pillow from 9.4.0.5 to 9.4.0.10. ([\#15064](https://github.com/matrix-org/synapse/issues/15064)) +- Bump sentry-sdk from 1.13.0 to 1.15.0. ([\#15065](https://github.com/matrix-org/synapse/issues/15065)) +- Bump types-jsonschema from 4.17.0.3 to 4.17.0.5. ([\#15099](https://github.com/matrix-org/synapse/issues/15099)) +- Bump types-bleach from 5.0.3.1 to 6.0.0.0. ([\#15100](https://github.com/matrix-org/synapse/issues/15100)) +- Bump dtolnay/rust-toolchain from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295. ([\#15101](https://github.com/matrix-org/synapse/issues/15101)) +- Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0. ([\#15102](https://github.com/matrix-org/synapse/issues/15102)) +- Bump types-pillow from 9.4.0.10 to 9.4.0.13. ([\#15104](https://github.com/matrix-org/synapse/issues/15104)) +- Bump types-setuptools from 67.1.0.0 to 67.3.0.1. ([\#15105](https://github.com/matrix-org/synapse/issues/15105)) + + +</details> + + +Synapse 1.77.0 (2023-02-14) +=========================== + +No significant changes since 1.77.0rc2. + + +Synapse 1.77.0rc2 (2023-02-10) +============================== + +Bugfixes +-------- + +- Fix bug where retried replication requests would return a failure. Introduced in v1.76.0. ([\#15024](https://github.com/matrix-org/synapse/issues/15024)) + + +Internal Changes +---------------- + +- Prepare for future database schema changes. ([\#15036](https://github.com/matrix-org/synapse/issues/15036)) + + +Synapse 1.77.0rc1 (2023-02-07) +============================== + +Features +-------- + +- Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#14823](https://github.com/matrix-org/synapse/issues/14823), [\#14943](https://github.com/matrix-org/synapse/issues/14943), [\#14957](https://github.com/matrix-org/synapse/issues/14957), [\#14958](https://github.com/matrix-org/synapse/issues/14958)) +- Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#14960](https://github.com/matrix-org/synapse/issues/14960), [\#15016](https://github.com/matrix-org/synapse/issues/15016)) +- Add profile information, devices and connections to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.77/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14894](https://github.com/matrix-org/synapse/issues/14894)) +- Improve performance when joining or sending an event in large rooms. ([\#14962](https://github.com/matrix-org/synapse/issues/14962)) +- Improve performance of joining and leaving large rooms with many local users. ([\#14971](https://github.com/matrix-org/synapse/issues/14971)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.53.0 where `next_batch` tokens from `/sync` could not be used with the `/relations` endpoint. ([\#14866](https://github.com/matrix-org/synapse/issues/14866)) +- Fix a bug introduced in Synapse 1.35.0 where the module API's `send_local_online_presence_to` would fail to send presence updates over federation. ([\#14880](https://github.com/matrix-org/synapse/issues/14880)) +- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14915](https://github.com/matrix-org/synapse/issues/14915)) +- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14926](https://github.com/matrix-org/synapse/issues/14926)) +- Fix a bug introduced in Synapse 1.68.0 where we were unable to service remote joins in rooms with `@room` notification levels set to `null` in their (malformed) power levels. ([\#14942](https://github.com/matrix-org/synapse/issues/14942)) +- Fix a bug introduced in Synapse 1.64.0 where boolean power levels were erroneously permitted in [v10 rooms](https://spec.matrix.org/v1.5/rooms/v10/). ([\#14944](https://github.com/matrix-org/synapse/issues/14944)) +- Fix a long-standing bug where sending messages on servers with presence enabled would spam "Re-starting finished log context" log lines. ([\#14947](https://github.com/matrix-org/synapse/issues/14947)) +- Fix a bug introduced in Synapse 1.68.0 where logging from the Rust module was not properly logged. ([\#14976](https://github.com/matrix-org/synapse/issues/14976)) +- Fix various long-standing bugs in Synapse's config, event and request handling where booleans were unintentionally accepted where an integer was expected. ([\#14945](https://github.com/matrix-org/synapse/issues/14945)) + + +Internal Changes +---------------- + +- Add missing type hints. ([\#14879](https://github.com/matrix-org/synapse/issues/14879), [\#14886](https://github.com/matrix-org/synapse/issues/14886), [\#14887](https://github.com/matrix-org/synapse/issues/14887), [\#14904](https://github.com/matrix-org/synapse/issues/14904), [\#14927](https://github.com/matrix-org/synapse/issues/14927), [\#14956](https://github.com/matrix-org/synapse/issues/14956), [\#14983](https://github.com/matrix-org/synapse/issues/14983), [\#14984](https://github.com/matrix-org/synapse/issues/14984), [\#14985](https://github.com/matrix-org/synapse/issues/14985), [\#14987](https://github.com/matrix-org/synapse/issues/14987), [\#14988](https://github.com/matrix-org/synapse/issues/14988), [\#14990](https://github.com/matrix-org/synapse/issues/14990), [\#14991](https://github.com/matrix-org/synapse/issues/14991), [\#14992](https://github.com/matrix-org/synapse/issues/14992), [\#15007](https://github.com/matrix-org/synapse/issues/15007)) +- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14922](https://github.com/matrix-org/synapse/issues/14922)) +- Allow running the complement tests suites with the asyncio reactor enabled. ([\#14858](https://github.com/matrix-org/synapse/issues/14858)) +- Improve performance of `/sync` in a few situations. ([\#14908](https://github.com/matrix-org/synapse/issues/14908), [\#14970](https://github.com/matrix-org/synapse/issues/14970)) +- Document how to handle Dependabot pull requests. ([\#14916](https://github.com/matrix-org/synapse/issues/14916)) +- Fix typo in release script. ([\#14920](https://github.com/matrix-org/synapse/issues/14920)) +- Update build system requirements to allow building with poetry-core 1.5.0. ([\#14949](https://github.com/matrix-org/synapse/issues/14949), [\#15019](https://github.com/matrix-org/synapse/issues/15019)) +- Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. ([\#14953](https://github.com/matrix-org/synapse/issues/14953)) +- Faster joins: Refactor internal handling of servers in room to never store an empty list. ([\#14954](https://github.com/matrix-org/synapse/issues/14954)) +- Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. ([\#14950](https://github.com/matrix-org/synapse/issues/14950)) +- Allow running `cargo` without the `extension-module` option. ([\#14965](https://github.com/matrix-org/synapse/issues/14965)) +- Preparatory work for adding a denormalised event stream ordering column in the future. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979), [9cd7610](https://github.com/matrix-org/synapse/commit/9cd7610f86ab5051c9365dd38d1eec405a5f8ca6), [f10caa7](https://github.com/matrix-org/synapse/commit/f10caa73eee0caa91cf373966104d1ededae2aee); see [\#15014](https://github.com/matrix-org/synapse/issues/15014)) +- Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002)) + +<details><summary>Locked dependency updates</summary> + +- Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968)) +- Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952)) +- Bump ijson from 3.1.4 to 3.2.0.post0. ([\#14935](https://github.com/matrix-org/synapse/issues/14935)) +- Bump types-pyyaml from 6.0.12.2 to 6.0.12.3. ([\#14936](https://github.com/matrix-org/synapse/issues/14936)) +- Bump types-jsonschema from 4.17.0.2 to 4.17.0.3. ([\#14937](https://github.com/matrix-org/synapse/issues/14937)) +- Bump types-pillow from 9.4.0.3 to 9.4.0.5. ([\#14938](https://github.com/matrix-org/synapse/issues/14938)) +- Bump hiredis from 2.0.0 to 2.1.1. ([\#14939](https://github.com/matrix-org/synapse/issues/14939)) +- Bump hiredis from 2.1.1 to 2.2.1. ([\#14993](https://github.com/matrix-org/synapse/issues/14993)) +- Bump types-setuptools from 65.6.0.3 to 67.1.0.0. ([\#14994](https://github.com/matrix-org/synapse/issues/14994)) +- Bump prometheus-client from 0.15.0 to 0.16.0. ([\#14995](https://github.com/matrix-org/synapse/issues/14995)) +- Bump anyhow from 1.0.68 to 1.0.69. ([\#14996](https://github.com/matrix-org/synapse/issues/14996)) +- Bump serde_json from 1.0.91 to 1.0.92. ([\#14997](https://github.com/matrix-org/synapse/issues/14997)) +- Bump isort from 5.11.4 to 5.11.5. ([\#14998](https://github.com/matrix-org/synapse/issues/14998)) +- Bump phonenumbers from 8.13.4 to 8.13.5. ([\#14999](https://github.com/matrix-org/synapse/issues/14999)) +</details> + +Synapse 1.76.0 (2023-01-31) +=========================== + +The 1.76 release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#faster-joins-are-enabled-by-default) for more details. + +The upgrade from 1.75 to 1.76 changes the account data replication streams in a backwards-incompatible manner. Server operators running a multi-worker deployment should consult [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#changes-to-the-account-data-replication-streams). + +Those who are `poetry install`ing from source using our lockfile should ensure their poetry version is 1.3.2 or higher; [see upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#minimum-version-of-poetry-is-now-132). + + +Notes on faster joins +--------------------- + +The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms. + +After a faster join, Synapse considers that room "partially joined". In this state, you should be able to + +- read incoming messages; +- see incoming state changes, e.g. room topic changes; and +- send messages, if the room is unencrypted. + +Synapse has to spend more effort to complete the join in the background. Once this finishes, you will be able to + +- send messages, if the room is in encrypted; +- retrieve room history from before your join, if permitted by the room settings; and +- access the full list of room members. + + +Improved Documentation +---------------------- + +- Describe the ideas and the internal machinery behind faster joins. ([\#14677](https://github.com/matrix-org/synapse/issues/14677)) + + +Synapse 1.76.0rc2 (2023-01-27) +============================== + +Bugfixes +-------- + +- Faster joins: Fix a bug introduced in Synapse 1.69 where device list EDUs could fail to be handled after a restart when a faster join sync is in progress. ([\#14914](https://github.com/matrix-org/synapse/issues/14914)) + + +Internal Changes +---------------- + +- Faster joins: Improve performance of looking up partial-state status of rooms. ([\#14917](https://github.com/matrix-org/synapse/issues/14917)) + + +Synapse 1.76.0rc1 (2023-01-25) +============================== + +Features +-------- + +- Update the default room version to [v10](https://spec.matrix.org/v1.5/rooms/v10/) ([MSC 3904](https://github.com/matrix-org/matrix-spec-proposals/pull/3904)). Contributed by @FSG-Cat. ([\#14111](https://github.com/matrix-org/synapse/issues/14111)) +- Add a `set_displayname()` method to the module API for setting a user's display name. ([\#14629](https://github.com/matrix-org/synapse/issues/14629)) +- Add a dedicated listener configuration for `health` endpoint. ([\#14747](https://github.com/matrix-org/synapse/issues/14747)) +- Implement support for [MSC3890](https://github.com/matrix-org/matrix-spec-proposals/pull/3890): Remotely silence local notifications. ([\#14775](https://github.com/matrix-org/synapse/issues/14775)) +- Implement experimental support for [MSC3930](https://github.com/matrix-org/matrix-spec-proposals/pull/3930): Push rules for ([MSC3381](https://github.com/matrix-org/matrix-spec-proposals/pull/3381)) Polls. ([\#14787](https://github.com/matrix-org/synapse/issues/14787)) +- Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement. ([\#14811](https://github.com/matrix-org/synapse/issues/14811)) +- Faster joins: always serve a partial join response to servers that request it with the stable query param. ([\#14839](https://github.com/matrix-org/synapse/issues/14839)) +- Faster joins: allow non-lazy-loading ("eager") syncs to complete after a partial join by omitting partial state rooms until they become fully stated. ([\#14870](https://github.com/matrix-org/synapse/issues/14870)) +- Faster joins: request partial joins by default. Admins can opt-out of this for the time being---see the upgrade notes. ([\#14905](https://github.com/matrix-org/synapse/issues/14905)) + + +Bugfixes +-------- + +- Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room. ([\#14799](https://github.com/matrix-org/synapse/issues/14799)) +- Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. ([\#14812](https://github.com/matrix-org/synapse/issues/14812), [\#14842](https://github.com/matrix-org/synapse/issues/14842)) +- Fix rare races when using workers. ([\#14820](https://github.com/matrix-org/synapse/issues/14820)) +- Fix a bug introduced in Synapse 1.64.0 when using room version 10 with frozen events enabled. ([\#14864](https://github.com/matrix-org/synapse/issues/14864)) +- Fix a long-standing bug where the `populate_room_stats` background job could fail on broken rooms. ([\#14873](https://github.com/matrix-org/synapse/issues/14873)) +- Faster joins: Fix a bug in worker deployments where the room stats and user directory would not get updated when finishing a fast join until another event is sent or received. ([\#14874](https://github.com/matrix-org/synapse/issues/14874)) +- Faster joins: Fix incompatibility with joins into restricted rooms where no local users have the ability to invite. ([\#14882](https://github.com/matrix-org/synapse/issues/14882)) +- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14910](https://github.com/matrix-org/synapse/issues/14910)) + + +Updates to the Docker image +--------------------------- + +- Bump default Python version in the Dockerfile from 3.9 to 3.11. ([\#14875](https://github.com/matrix-org/synapse/issues/14875)) + + +Improved Documentation +---------------------- + +- Include `x_forwarded` entry in the HTTP listener example configs and remove the remaining `worker_main_http_uri` entries. ([\#14667](https://github.com/matrix-org/synapse/issues/14667)) +- Remove duplicate commands from the Code Style documentation page; point to the Contributing Guide instead. ([\#14773](https://github.com/matrix-org/synapse/issues/14773)) +- Add missing documentation for `tag` to `listeners` section. ([\#14803](https://github.com/matrix-org/synapse/issues/14803)) +- Updated documentation in configuration manual for `user_directory.search_all_users`. ([\#14818](https://github.com/matrix-org/synapse/issues/14818)) +- Add `worker_manhole` to configuration manual. ([\#14824](https://github.com/matrix-org/synapse/issues/14824)) +- Fix the example config missing the `id` field in [application service documentation](https://matrix-org.github.io/synapse/latest/application_services.html). ([\#14845](https://github.com/matrix-org/synapse/issues/14845)) +- Minor corrections to the logging configuration documentation. ([\#14868](https://github.com/matrix-org/synapse/issues/14868)) +- Document the export user data command. Contributed by @thezaidbintariq. ([\#14883](https://github.com/matrix-org/synapse/issues/14883)) + + +Deprecations and Removals +------------------------- + +- Poetry 1.3.2 or higher is now required when `poetry install`ing from source. ([\#14860](https://github.com/matrix-org/synapse/issues/14860)) + + +Internal Changes +---------------- + +- Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state. ([\#14749](https://github.com/matrix-org/synapse/issues/14749)) +- Enable Complement tests for Faster Remote Room Joins against worker-mode Synapse. ([\#14752](https://github.com/matrix-org/synapse/issues/14752)) +- Add some clarifying comments and refactor a portion of the `Keyring` class for readability. ([\#14804](https://github.com/matrix-org/synapse/issues/14804)) +- Add local poetry config files (`poetry.toml`) to `.gitignore`. ([\#14807](https://github.com/matrix-org/synapse/issues/14807)) +- Add missing type hints. ([\#14816](https://github.com/matrix-org/synapse/issues/14816), [\#14885](https://github.com/matrix-org/synapse/issues/14885), [\#14889](https://github.com/matrix-org/synapse/issues/14889)) +- Refactor push tests. ([\#14819](https://github.com/matrix-org/synapse/issues/14819)) +- Re-enable some linting that was disabled when we switched to ruff. ([\#14821](https://github.com/matrix-org/synapse/issues/14821)) +- Add `cargo fmt` and `cargo clippy` to the lint script. ([\#14822](https://github.com/matrix-org/synapse/issues/14822)) +- Drop unused table `presence`. ([\#14825](https://github.com/matrix-org/synapse/issues/14825)) +- Merge the two account data and the two device list replication streams. ([\#14826](https://github.com/matrix-org/synapse/issues/14826), [\#14833](https://github.com/matrix-org/synapse/issues/14833)) +- Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#14832](https://github.com/matrix-org/synapse/issues/14832), [\#14841](https://github.com/matrix-org/synapse/issues/14841)) +- Add a parameter to control whether the federation client performs a partial state join. ([\#14843](https://github.com/matrix-org/synapse/issues/14843)) +- Add check to avoid starting duplicate partial state syncs. ([\#14844](https://github.com/matrix-org/synapse/issues/14844)) +- Add an early return when handling no-op presence updates. ([\#14855](https://github.com/matrix-org/synapse/issues/14855)) +- Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. ([\#14856](https://github.com/matrix-org/synapse/issues/14856), [\#14872](https://github.com/matrix-org/synapse/issues/14872)) +- Always notify replication when a stream advances automatically. ([\#14877](https://github.com/matrix-org/synapse/issues/14877)) +- Reduce max time we wait for stream positions. ([\#14881](https://github.com/matrix-org/synapse/issues/14881)) +- Faster joins: allow the resync process more time to fetch `/state` ids. ([\#14912](https://github.com/matrix-org/synapse/issues/14912)) +- Bump regex from 1.7.0 to 1.7.1. ([\#14848](https://github.com/matrix-org/synapse/issues/14848)) +- Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2. ([\#14861](https://github.com/matrix-org/synapse/issues/14861)) +- Bump ruff from 0.0.215 to 0.0.224. ([\#14862](https://github.com/matrix-org/synapse/issues/14862)) +- Bump types-pillow from 9.4.0.0 to 9.4.0.3. ([\#14863](https://github.com/matrix-org/synapse/issues/14863)) +- Bump types-opentracing from 2.4.10 to 2.4.10.1. ([\#14896](https://github.com/matrix-org/synapse/issues/14896)) +- Bump ruff from 0.0.224 to 0.0.230. ([\#14897](https://github.com/matrix-org/synapse/issues/14897)) +- Bump types-requests from 2.28.11.7 to 2.28.11.8. ([\#14899](https://github.com/matrix-org/synapse/issues/14899)) +- Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4. ([\#14900](https://github.com/matrix-org/synapse/issues/14900)) +- Bump types-commonmark from 0.9.2 to 0.9.2.1. ([\#14901](https://github.com/matrix-org/synapse/issues/14901)) + + +Synapse 1.75.0 (2023-01-17) +=========================== + +No significant changes since 1.75.0rc2. + + +Synapse 1.75.0rc2 (2023-01-12) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.75.0rc1 where device lists could be miscalculated with some sync filters. ([\#14810](https://github.com/matrix-org/synapse/issues/14810)) +- Fix race where calling `/members` or `/state` with an `at` parameter could fail for newly created rooms, when using multiple workers. ([\#14817](https://github.com/matrix-org/synapse/issues/14817)) + + +Synapse 1.75.0rc1 (2023-01-10) +============================== + +Features +-------- + +- Add a `cached` function to `synapse.module_api` that returns a decorator to cache return values of functions. ([\#14663](https://github.com/matrix-org/synapse/issues/14663)) +- Add experimental support for [MSC3391](https://github.com/matrix-org/matrix-spec-proposals/pull/3391) (removing account data). ([\#14714](https://github.com/matrix-org/synapse/issues/14714)) +- Support [RFC7636](https://datatracker.ietf.org/doc/html/rfc7636) Proof Key for Code Exchange for OAuth single sign-on. ([\#14750](https://github.com/matrix-org/synapse/issues/14750)) +- Support non-OpenID compliant userinfo claims for subject and picture. ([\#14753](https://github.com/matrix-org/synapse/issues/14753)) +- Improve performance of `/sync` when filtering all rooms, message types, or senders. ([\#14786](https://github.com/matrix-org/synapse/issues/14786)) +- Improve performance of the `/hierarchy` endpoint. ([\#14263](https://github.com/matrix-org/synapse/issues/14263)) + + +Bugfixes +-------- + +- Fix the *MAU Limits* section of the Grafana dashboard relying on a specific `job` name for the workers of a Synapse deployment. ([\#14644](https://github.com/matrix-org/synapse/issues/14644)) +- Fix a bug introduced in Synapse 1.70.0 which could cause spurious `UNIQUE constraint failed` errors in the `rotate_notifs` background job. ([\#14669](https://github.com/matrix-org/synapse/issues/14669)) +- Ensure stream IDs are always updated after caches get invalidated with workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14723](https://github.com/matrix-org/synapse/issues/14723)) +- Remove the unspecced `device` field from `/pushrules` responses. ([\#14727](https://github.com/matrix-org/synapse/issues/14727)) +- Fix a bug introduced in Synapse 1.73.0 where the `picture_claim` configured under `oidc_providers` was unused (the default value of `"picture"` was used instead). ([\#14751](https://github.com/matrix-org/synapse/issues/14751)) +- Unescape HTML entities in URL preview titles making use of oEmbed responses. ([\#14781](https://github.com/matrix-org/synapse/issues/14781)) +- Disable sending confirmation email when 3pid is disabled. ([\#14725](https://github.com/matrix-org/synapse/issues/14725)) + + +Improved Documentation +---------------------- + +- Declare support for Python 3.11. ([\#14673](https://github.com/matrix-org/synapse/issues/14673)) +- Fix `target_memory_usage` being used in the description for the actual `cache_autotune` sub-option `target_cache_memory_usage`. ([\#14674](https://github.com/matrix-org/synapse/issues/14674)) +- Move `email` to Server section in config file documentation. ([\#14730](https://github.com/matrix-org/synapse/issues/14730)) +- Fix broken links in the Synapse documentation. ([\#14744](https://github.com/matrix-org/synapse/issues/14744)) +- Add missing worker settings to shared configuration documentation. ([\#14748](https://github.com/matrix-org/synapse/issues/14748)) +- Document using Twitter as a OAuth 2.0 authentication provider. ([\#14778](https://github.com/matrix-org/synapse/issues/14778)) +- Fix Synapse 1.74 upgrade notes to correctly explain how to install pyICU when installing Synapse from PyPI. ([\#14797](https://github.com/matrix-org/synapse/issues/14797)) +- Update link to towncrier in contribution guide. ([\#14801](https://github.com/matrix-org/synapse/issues/14801)) +- Use `htmltest` to check links in the Synapse documentation. ([\#14743](https://github.com/matrix-org/synapse/issues/14743)) + + +Internal Changes +---------------- + +- Faster remote room joins: stream the un-partial-stating of events over replication. ([\#14545](https://github.com/matrix-org/synapse/issues/14545), [\#14546](https://github.com/matrix-org/synapse/issues/14546)) +- Use [ruff](https://github.com/charliermarsh/ruff/) instead of flake8. ([\#14633](https://github.com/matrix-org/synapse/issues/14633), [\#14741](https://github.com/matrix-org/synapse/issues/14741)) +- Change `handle_new_client_event` signature so that a 429 does not reach clients on `PartialStateConflictError`, and internally retry when needed instead. ([\#14665](https://github.com/matrix-org/synapse/issues/14665)) +- Remove dependency on jQuery on reCAPTCHA page. ([\#14672](https://github.com/matrix-org/synapse/issues/14672)) +- Faster joins: make `compute_state_after_events` consistent with other state-fetching functions that take a `StateFilter`. ([\#14676](https://github.com/matrix-org/synapse/issues/14676)) +- Add missing type hints. ([\#14680](https://github.com/matrix-org/synapse/issues/14680), [\#14681](https://github.com/matrix-org/synapse/issues/14681), [\#14687](https://github.com/matrix-org/synapse/issues/14687)) +- Improve type annotations for the helper methods on a `CachedFunction`. ([\#14685](https://github.com/matrix-org/synapse/issues/14685)) +- Check that the SQLite database file exists before porting to PostgreSQL. ([\#14692](https://github.com/matrix-org/synapse/issues/14692)) +- Add `.direnv/` directory to .gitignore to prevent local state generated by the [direnv](https://direnv.net/) development tool from being committed. ([\#14707](https://github.com/matrix-org/synapse/issues/14707)) +- Batch up replication requests to request the resyncing of remote users's devices. ([\#14716](https://github.com/matrix-org/synapse/issues/14716)) +- If debug logging is enabled, log the `msgid`s of any to-device messages that are returned over `/sync`. ([\#14724](https://github.com/matrix-org/synapse/issues/14724)) +- Change GHA CI job to follow best practices. ([\#14772](https://github.com/matrix-org/synapse/issues/14772)) +- Switch to our fork of `dh-virtualenv` to work around an upstream Python 3.11 incompatibility. ([\#14774](https://github.com/matrix-org/synapse/issues/14774)) +- Skip testing built wheels for PyPy 3.7 on Linux x86_64 as we lack new required dependencies in the build environment. ([\#14802](https://github.com/matrix-org/synapse/issues/14802)) + +### Dependabot updates + +<details> + +- Bump JasonEtco/create-an-issue from 2.8.1 to 2.8.2. ([\#14693](https://github.com/matrix-org/synapse/issues/14693)) +- Bump anyhow from 1.0.66 to 1.0.68. ([\#14694](https://github.com/matrix-org/synapse/issues/14694)) +- Bump blake2 from 0.10.5 to 0.10.6. ([\#14695](https://github.com/matrix-org/synapse/issues/14695)) +- Bump serde_json from 1.0.89 to 1.0.91. ([\#14696](https://github.com/matrix-org/synapse/issues/14696)) +- Bump serde from 1.0.150 to 1.0.151. ([\#14697](https://github.com/matrix-org/synapse/issues/14697)) +- Bump lxml from 4.9.1 to 4.9.2. ([\#14698](https://github.com/matrix-org/synapse/issues/14698)) +- Bump types-jsonschema from 4.17.0.1 to 4.17.0.2. ([\#14700](https://github.com/matrix-org/synapse/issues/14700)) +- Bump sentry-sdk from 1.11.1 to 1.12.0. ([\#14701](https://github.com/matrix-org/synapse/issues/14701)) +- Bump types-setuptools from 65.6.0.1 to 65.6.0.2. ([\#14702](https://github.com/matrix-org/synapse/issues/14702)) +- Bump minimum PyYAML to 3.13. ([\#14720](https://github.com/matrix-org/synapse/issues/14720)) +- Bump JasonEtco/create-an-issue from 2.8.2 to 2.9.1. ([\#14731](https://github.com/matrix-org/synapse/issues/14731)) +- Bump towncrier from 22.8.0 to 22.12.0. ([\#14732](https://github.com/matrix-org/synapse/issues/14732)) +- Bump isort from 5.10.1 to 5.11.4. ([\#14733](https://github.com/matrix-org/synapse/issues/14733)) +- Bump attrs from 22.1.0 to 22.2.0. ([\#14734](https://github.com/matrix-org/synapse/issues/14734)) +- Bump black from 22.10.0 to 22.12.0. ([\#14735](https://github.com/matrix-org/synapse/issues/14735)) +- Bump sentry-sdk from 1.12.0 to 1.12.1. ([\#14736](https://github.com/matrix-org/synapse/issues/14736)) +- Bump setuptools from 65.3.0 to 65.5.1. ([\#14738](https://github.com/matrix-org/synapse/issues/14738)) +- Bump serde from 1.0.151 to 1.0.152. ([\#14758](https://github.com/matrix-org/synapse/issues/14758)) +- Bump ruff from 0.0.189 to 0.0.206. ([\#14759](https://github.com/matrix-org/synapse/issues/14759)) +- Bump pydantic from 1.10.2 to 1.10.4. ([\#14760](https://github.com/matrix-org/synapse/issues/14760)) +- Bump gitpython from 3.1.29 to 3.1.30. ([\#14761](https://github.com/matrix-org/synapse/issues/14761)) +- Bump pillow from 9.3.0 to 9.4.0. ([\#14762](https://github.com/matrix-org/synapse/issues/14762)) +- Bump types-requests from 2.28.11.5 to 2.28.11.7. ([\#14763](https://github.com/matrix-org/synapse/issues/14763)) +- Bump dawidd6/action-download-artifact from 2.24.2 to 2.24.3. ([\#14779](https://github.com/matrix-org/synapse/issues/14779)) +- Bump peaceiris/actions-gh-pages from 3.9.0 to 3.9.1. ([\#14791](https://github.com/matrix-org/synapse/issues/14791)) +- Bump types-pillow from 9.3.0.4 to 9.4.0.0. ([\#14792](https://github.com/matrix-org/synapse/issues/14792)) +- Bump pyopenssl from 22.1.0 to 23.0.0. ([\#14793](https://github.com/matrix-org/synapse/issues/14793)) +- Bump types-setuptools from 65.6.0.2 to 65.6.0.3. ([\#14794](https://github.com/matrix-org/synapse/issues/14794)) +- Bump importlib-metadata from 4.2.0 to 6.0.0. ([\#14795](https://github.com/matrix-org/synapse/issues/14795)) +- Bump ruff from 0.0.206 to 0.0.215. ([\#14796](https://github.com/matrix-org/synapse/issues/14796)) +</details> diff --git a/docs/changelogs/CHANGES-2024.md b/docs/changelogs/CHANGES-2024.md new file mode 100644
index 0000000000..ee354f1573 --- /dev/null +++ b/docs/changelogs/CHANGES-2024.md
@@ -0,0 +1,1586 @@ +# Synapse 1.121.1 (2024-12-11) + +This release contains a fix for our docker build CI. It is functionally identical to 1.121.0, whose changelog is below. + +### Internal Changes + +- Downgrade the Ubuntu GHA runner when building docker images. ([\#18026](https://github.com/element-hq/synapse/issues/18026)) + + + +# Synapse 1.121.0 (2024-12-11) + +### Internal Changes + +- Fix release process to not create duplicate releases. ([\#18025](https://github.com/element-hq/synapse/issues/18025)) + + + +# Synapse 1.121.0rc1 (2024-12-04) + +### Features + +- Support for [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190): device management for Application Services. ([\#17705](https://github.com/element-hq/synapse/issues/17705)) +- Update [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync to include invite, ban, kick, targets when `$LAZY`-loading room members. ([\#17947](https://github.com/element-hq/synapse/issues/17947)) +- Use stable `M_USER_LOCKED` error code for locked accounts, as per [Matrix 1.12](https://spec.matrix.org/v1.12/client-server-api/#account-locking). ([\#17965](https://github.com/element-hq/synapse/issues/17965)) +- [MSC4076](https://github.com/matrix-org/matrix-spec-proposals/pull/4076): Add `disable_badge_count` to pusher configuration. ([\#17975](https://github.com/element-hq/synapse/issues/17975)) + +### Bugfixes + +- Fix long-standing bug where read receipts could get overly delayed being sent over federation. ([\#17933](https://github.com/element-hq/synapse/issues/17933)) + +### Improved Documentation + +- Add OIDC example configuration for Forgejo (fork of Gitea). ([\#17872](https://github.com/element-hq/synapse/issues/17872)) +- Link to element-docker-demo from contrib/docker*. ([\#17953](https://github.com/element-hq/synapse/issues/17953)) + +### Internal Changes + +- [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108): Add a `Content-Type` header on the `PUT` response to work around a faulty behavior in some caching reverse proxies. ([\#17253](https://github.com/element-hq/synapse/issues/17253)) +- Fix incorrect comment in new schema delta. ([\#17936](https://github.com/element-hq/synapse/issues/17936)) +- Raise setuptools_rust version cap to 1.10.2. ([\#17944](https://github.com/element-hq/synapse/issues/17944)) +- Enable encrypted appservice related experimental features in the complement docker image. ([\#17945](https://github.com/element-hq/synapse/issues/17945)) +- Return whether the user is suspended when querying the user account in the Admin API. ([\#17952](https://github.com/element-hq/synapse/issues/17952)) +- Fix new scheduled tasks jumping the queue. ([\#17962](https://github.com/element-hq/synapse/issues/17962)) +- Bump pyo3 and dependencies to v0.23.2. ([\#17966](https://github.com/element-hq/synapse/issues/17966)) +- Update setuptools-rust and fix building abi3 wheels in latest version. ([\#17969](https://github.com/element-hq/synapse/issues/17969)) +- Consolidate SSO redirects through `/_matrix/client/v3/login/sso/redirect(/{idpId})`. ([\#17972](https://github.com/element-hq/synapse/issues/17972)) +- Fix Docker and Complement config to be able to use `public_baseurl`. ([\#17986](https://github.com/element-hq/synapse/issues/17986)) +- Fix building wheels for MacOS which was temporarily disabled in Synapse 1.120.2. ([\#17993](https://github.com/element-hq/synapse/issues/17993)) +- Fix release process to not create duplicate releases. ([\#17970](https://github.com/element-hq/synapse/issues/17970), [\#17995](https://github.com/element-hq/synapse/issues/17995)) + + +### Updates to locked dependencies + +* Bump bytes from 1.8.0 to 1.9.0. ([\#17982](https://github.com/element-hq/synapse/issues/17982)) +* Bump pysaml2 from 7.3.1 to 7.5.0. ([\#17978](https://github.com/element-hq/synapse/issues/17978)) +* Bump serde_json from 1.0.132 to 1.0.133. ([\#17939](https://github.com/element-hq/synapse/issues/17939)) +* Bump tomli from 2.0.2 to 2.1.0. ([\#17959](https://github.com/element-hq/synapse/issues/17959)) +* Bump tomli from 2.1.0 to 2.2.1. ([\#17979](https://github.com/element-hq/synapse/issues/17979)) +* Bump tornado from 6.4.1 to 6.4.2. ([\#17955](https://github.com/element-hq/synapse/issues/17955)) + +# Synapse 1.120.2 (2024-12-03) + +This version has building of wheels for macOS disabled. +It is functionally identical to 1.120.1, which contains multiple security fixes. +If you are already using 1.120.1, there is no need to upgrade to this version. + + + +# Synapse 1.120.1 (2024-12-03) + +This patch release fixes multiple security vulnerabilities, some affecting all prior versions of Synapse. Server administrators are encouraged to update Synapse as soon as possible. We are not aware of these vulnerabilities being exploited in the wild. + +Administrators who are unable to update Synapse may use the workarounds described in the linked GitHub Security Advisory below. + +### Security advisory + +The following issues are fixed in 1.120.1. + +- [GHSA-rfq8-j7rh-8hf2](https://github.com/element-hq/synapse/security/advisories/GHSA-rfq8-j7rh-8hf2) / [CVE-2024-52805](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-52805): **Unsupported content types can lead to memory exhaustion** + + Synapse instances which have a high `max_upload_size` and which don't have a reverse proxy in front of them that would otherwise limit upload size are affected. + + Fixed by [4b7154c58501b4bf5e1c2d6c11ebef96529f2fdf](https://github.com/element-hq/synapse/commit/4b7154c58501b4bf5e1c2d6c11ebef96529f2fdf). + +- [GHSA-f3r3-h2mq-hx2h](https://github.com/element-hq/synapse/security/advisories/GHSA-f3r3-h2mq-hx2h) / [CVE-2024-52815](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-52815): **Malicious invites via federation can break a user's sync** + + Fixed by [d82e1ed357b7ee21dff83d06cba7a67840cfd464](https://github.com/element-hq/synapse/commit/d82e1ed357b7ee21dff83d06cba7a67840cfd464). + +- [GHSA-vp6v-whfm-rv3g](https://github.com/element-hq/synapse/security/advisories/GHSA-vp6v-whfm-rv3g) / [CVE-2024-53863](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-53863): **Synapse can be forced to thumbnail unexpected file formats, invoking potentially untrustworthy decoders** + + Synapse instances can disable dynamic thumbnailing by setting `dynamic_thumbnails` to `false` in the configuration file. + + Fixed by [b64a4e5fbbbf119b6c65aedf0d999b4237d55503](https://github.com/element-hq/synapse/commit/b64a4e5fbbbf119b6c65aedf0d999b4237d55503). + +- [GHSA-56w4-5538-8v8h](https://github.com/element-hq/synapse/security/advisories/GHSA-56w4-5538-8v8h) / [CVE-2024-53867](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-53867): **The Sliding Sync feature on Synapse versions between 1.113.0rc1 and 1.120.0 can leak partial room state changes to users no longer in a room** + + Non-state events, like messages, are unaffected. + + Synapse instances can disable the Sliding Sync feature by setting `experimental_features.msc3575_enabled` to `false` in the configuration file. + + Fixed by [4daa533e82f345ce87b9495d31781af570ba3ead](https://github.com/element-hq/synapse/commit/4daa533e82f345ce87b9495d31781af570ba3ead). + +See the advisories for more details. If you have any questions, email [security at element.io](mailto:security@element.io). + +### Bugfixes + +- Fix release process to not create duplicate releases. ([\#17970](https://github.com/element-hq/synapse/issues/17970)) + + + +# Synapse 1.120.0 (2024-11-26) + +### Bugfixes + +- Fix a bug introduced in Synapse v1.120rc1 which would cause the newly-introduced `delete_old_otks` job to fail in worker-mode deployments. ([\#17960](https://github.com/element-hq/synapse/issues/17960)) + + + + +# Synapse 1.120.0rc1 (2024-11-20) + +This release enables the enforcement of authenticated media by default, with exemptions for media that is already present in the +homeserver's media store. + +Most homeservers operating in the public federation will not be impacted by this change, given that +the large homeserver `matrix.org` enabled this in September 2024 and therefore most clients and servers +will already have updated as a result. + +Some server administrators may still wish to disable this enforcement for the time being, in the interest of compatibility with older clients +and older federated homeservers. +See the [upgrade notes](https://element-hq.github.io/synapse/v1.120/upgrade.html#authenticated-media-is-now-enforced-by-default) for more information. + +### Features + +- Enforce authenticated media by default. Administrators can revert this by configuring `enable_authenticated_media` to `false`. In a future release of Synapse, this option will be removed and become always-on. ([\#17889](https://github.com/element-hq/synapse/issues/17889)) +- Add a one-off task to delete old One-Time Keys, to guard against us having old OTKs in the database that the client has long forgotten about. ([\#17934](https://github.com/element-hq/synapse/issues/17934)) + +### Improved Documentation + +- Clarify the semantics of the `enable_authenticated_media` configuration option. ([\#17913](https://github.com/element-hq/synapse/issues/17913)) +- Add documentation about backing up Synapse. ([\#17931](https://github.com/element-hq/synapse/issues/17931)) + +### Deprecations and Removals + +- Remove support for [MSC3886: Simple client rendezvous capability](https://github.com/matrix-org/matrix-spec-proposals/pull/3886), which has been superseded by [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108) and therefore closed. ([\#17638](https://github.com/element-hq/synapse/issues/17638)) + +### Internal Changes + +- Addressed some typos in docs and returned error message for unknown MXC ID. ([\#17865](https://github.com/element-hq/synapse/issues/17865)) +- Unpin the upload release GHA action. ([\#17923](https://github.com/element-hq/synapse/issues/17923)) +- Bump macOS version used to build wheels during release, as current version used is end-of-life. ([\#17924](https://github.com/element-hq/synapse/issues/17924)) +- Move server event filtering logic to Rust. ([\#17928](https://github.com/element-hq/synapse/issues/17928)) +- Support new package name of PyPI package `python-multipart` 0.0.13 so that distro packagers do not need to work around name conflict with PyPI package `multipart`. ([\#17932](https://github.com/element-hq/synapse/issues/17932)) +- Speed up slow initial sliding syncs on large servers. ([\#17946](https://github.com/element-hq/synapse/issues/17946)) + +### Updates to locked dependencies + +* Bump anyhow from 1.0.92 to 1.0.93. ([\#17920](https://github.com/element-hq/synapse/issues/17920)) +* Bump bleach from 6.1.0 to 6.2.0. ([\#17918](https://github.com/element-hq/synapse/issues/17918)) +* Bump immutabledict from 4.2.0 to 4.2.1. ([\#17941](https://github.com/element-hq/synapse/issues/17941)) +* Bump packaging from 24.1 to 24.2. ([\#17940](https://github.com/element-hq/synapse/issues/17940)) +* Bump phonenumbers from 8.13.49 to 8.13.50. ([\#17942](https://github.com/element-hq/synapse/issues/17942)) +* Bump pygithub from 2.4.0 to 2.5.0. ([\#17917](https://github.com/element-hq/synapse/issues/17917)) +* Bump ruff from 0.7.2 to 0.7.3. ([\#17919](https://github.com/element-hq/synapse/issues/17919)) +* Bump serde from 1.0.214 to 1.0.215. ([\#17938](https://github.com/element-hq/synapse/issues/17938)) + +# Synapse 1.119.0 (2024-11-13) + +No significant changes since 1.119.0rc2. + +### Python 3.8 support dropped + +Python 3.8 is [end-of-life](https://devguide.python.org/versions/) and is no longer supported by Synapse. The minimum supported Python version is now 3.9. + +If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or greater) before upgrading Synapse. + + +# Synapse 1.119.0rc2 (2024-11-11) + +Note that due to packaging issues there was no v1.119.0rc1. + + +### Features + +- Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. ([\#17374](https://github.com/element-hq/synapse/issues/17374)) +- Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2). ([\#17888](https://github.com/element-hq/synapse/issues/17888)) + +### Bugfixes + +- Fix bug with sliding sync where `$LAZY`-loading room members would not return `required_state` membership in incremental syncs. ([\#17809](https://github.com/element-hq/synapse/issues/17809)) +- Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi. ([\#17839](https://github.com/element-hq/synapse/issues/17839)) +- Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in + the config option `run_background_tasks_on`. ([\#17847](https://github.com/element-hq/synapse/issues/17847)) +- Fix bug where some presence and typing timeouts can expire early. ([\#17850](https://github.com/element-hq/synapse/issues/17850)) +- Fix detection when the built Rust library was outdated when using source installations. ([\#17861](https://github.com/element-hq/synapse/issues/17861)) +- Fix a long-standing bug in Synapse which could cause one-time keys to be issued in the incorrect order, causing message decryption failures. ([\#17903](https://github.com/element-hq/synapse/pull/17903)) +- Fix experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2) where we would return the full state on incremental syncs when using lazy loaded members and there were no new events in the timeline. ([\#17915](https://github.com/element-hq/synapse/pull/17915)) + +### Internal Changes + +- Remove support for python 3.8. ([\#17908](https://github.com/element-hq/synapse/issues/17908)) +- Add a test for downloading and thumbnailing a CMYK JPEG. ([\#17786](https://github.com/element-hq/synapse/issues/17786)) +- Refactor database calls to remove `Generator` usage. ([\#17813](https://github.com/element-hq/synapse/issues/17813), [\#17814](https://github.com/element-hq/synapse/issues/17814), [\#17815](https://github.com/element-hq/synapse/issues/17815), [\#17816](https://github.com/element-hq/synapse/issues/17816), [\#17817](https://github.com/element-hq/synapse/issues/17817), [\#17818](https://github.com/element-hq/synapse/issues/17818), [\#17890](https://github.com/element-hq/synapse/issues/17890)) +- Include the destination in the error of 'Destination mismatch' on federation requests. ([\#17830](https://github.com/element-hq/synapse/issues/17830)) +- The nix flake inside the repository no longer tracks nixpkgs/master to not catch the latest bugs from a PR merged 5 minutes ago. ([\#17852](https://github.com/element-hq/synapse/issues/17852)) +- Minor speed-up of sliding sync by computing extensions results in parallel. ([\#17884](https://github.com/element-hq/synapse/issues/17884)) +- Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12. ([\#17887](https://github.com/element-hq/synapse/issues/17887)) +- Remove usage of internal header encoding API. ([\#17894](https://github.com/element-hq/synapse/issues/17894)) +- Use unique name for each os.arch variant when uploading Wheel artifacts. ([\#17905](https://github.com/element-hq/synapse/issues/17905)) +- Fix tests to run with latest Twisted. ([\#17906](https://github.com/element-hq/synapse/pull/17906), [\#17907](https://github.com/element-hq/synapse/pull/17907), [\#17911](https://github.com/element-hq/synapse/pull/17911)) +- Update version constraint to allow the latest poetry-core 1.9.1. ([\#17902](https://github.com/element-hq/synapse/pull/17902)) +- Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. ([\#17909](https://github.com/element-hq/synapse/pull/17909)) +- Add an index to `current_state_delta_stream` table. ([\#17912](https://github.com/element-hq/synapse/issues/17912)) +- Fix building and attaching release artifacts during the release process. ([\#17921](https://github.com/element-hq/synapse/issues/17921)) + +### Updates to locked dependencies + +* Bump actions/download-artifact & actions/upload-artifact from 3 to 4 in /.github/workflows. ([\#17657](https://github.com/element-hq/synapse/issues/17657)) +* Bump anyhow from 1.0.89 to 1.0.92. ([\#17858](https://github.com/element-hq/synapse/issues/17858), [\#17876](https://github.com/element-hq/synapse/issues/17876), [\#17901](https://github.com/element-hq/synapse/issues/17901)) +* Bump bytes from 1.7.2 to 1.8.0. ([\#17877](https://github.com/element-hq/synapse/issues/17877)) +* Bump cryptography from 43.0.1 to 43.0.3. ([\#17853](https://github.com/element-hq/synapse/issues/17853)) +* Bump mypy-zope from 1.0.7 to 1.0.8. ([\#17898](https://github.com/element-hq/synapse/issues/17898)) +* Bump phonenumbers from 8.13.47 to 8.13.49. ([\#17880](https://github.com/element-hq/synapse/issues/17880), [\#17899](https://github.com/element-hq/synapse/issues/17899)) +* Bump python-multipart from 0.0.12 to 0.0.16. ([\#17879](https://github.com/element-hq/synapse/issues/17879)) +* Bump regex from 1.11.0 to 1.11.1. ([\#17874](https://github.com/element-hq/synapse/issues/17874)) +* Bump ruff from 0.6.9 to 0.7.2. ([\#17868](https://github.com/element-hq/synapse/issues/17868), [\#17897](https://github.com/element-hq/synapse/issues/17897)) +* Bump serde from 1.0.210 to 1.0.214. ([\#17875](https://github.com/element-hq/synapse/issues/17875), [\#17900](https://github.com/element-hq/synapse/issues/17900)) +* Bump serde_json from 1.0.128 to 1.0.132. ([\#17857](https://github.com/element-hq/synapse/issues/17857)) +* Bump types-psycopg2 from 2.9.21.20240819 to 2.9.21.20241019. ([\#17855](https://github.com/element-hq/synapse/issues/17855)) +* Bump types-setuptools from 75.1.0.20241014 to 75.2.0.20241019. ([\#17856](https://github.com/element-hq/synapse/issues/17856)) + +# Synapse 1.118.0 (2024-10-29) + +No significant changes since 1.118.0rc1. + +### Python 3.8 support will be dropped in the next release + +Python 3.8 is now [end-of-life](https://devguide.python.org/versions/). As per our [Deprecation Policy for Platform Dependencies](https://element-hq.github.io/synapse/latest/deprecation_policy.html#policy), Synapse will be dropping support for Python 3.8 in the next release; Synapse 1.119.0. + +Synapse 1.118.x will be the final release to support Python 3.8. If you are running Synapse with Python 3.8, please upgrade before the 1.119.0 release, due in less than one month. + +### Python 3.13 and PostgreSQL 17 support + +On the other end of the spectrum, Synapse 1.118.0 is the first release to support [Python 3.13](https://www.python.org/downloads/release/python-3130/)! [PostgreSQL 17](https://www.postgresql.org/about/news/postgresql-17-released-2936/) is also supported as of this release. + + +# Synapse 1.118.0rc1 (2024-10-22) + +### Features + +- Added the `display_name_claim` option to the JWT configuration. This option allows specifying the claim key that contains the user's display name in the JWT payload. ([\#17708](https://github.com/element-hq/synapse/issues/17708)) +- Implement [MSC4210](https://github.com/matrix-org/matrix-spec-proposals/pull/4210): Remove legacy mentions. Contributed by @tulir @ Beeper. ([\#17783](https://github.com/element-hq/synapse/issues/17783)) + +### Bugfixes + +- Fix saving of PNG thumbnails, when the original image is in the CMYK color space. ([\#17736](https://github.com/element-hq/synapse/issues/17736)) +- Fix bug with sliding sync where the server would not return state that was added to the `required_state` config. ([\#17785](https://github.com/element-hq/synapse/issues/17785), [\#17805](https://github.com/element-hq/synapse/issues/17805)) +- Fix a bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync that would cause rooms to stay forgotten and hidden even after rejoining. ([\#17835](https://github.com/element-hq/synapse/issues/17835)) + +### Improved Documentation + +- Clarify when the `user_may_invite` and `user_may_send_3pid_invite` module callbacks are called. ([\#17627](https://github.com/element-hq/synapse/issues/17627)) +- Correct documentation to refer to the `--config-path` argument instead of `--config-file`. ([\#17802](https://github.com/element-hq/synapse/issues/17802)) +- Fix typo in `target_cache_memory_usage` docs. ([\#17825](https://github.com/element-hq/synapse/issues/17825)) + +### Internal Changes + +- Slight optimization when fetching state/events for Sliding Sync. ([\#17718](https://github.com/element-hq/synapse/issues/17718)) +- Add Python 3.13 and Postgres 17 to the test matrix. ([\#17752](https://github.com/element-hq/synapse/issues/17752)) +- Test github token before running release script steps. ([\#17803](https://github.com/element-hq/synapse/issues/17803)) +- Build debian packages for new Ubuntu versions, and stop building for no longer supported versions. ([\#17824](https://github.com/element-hq/synapse/issues/17824)) +- Enable the `.org.matrix.msc4028.encrypted_event` push rule by default in accordance with [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028). Note that the corresponding experimental feature must still be switched on for this push rule to have any effect. ([\#17826](https://github.com/element-hq/synapse/issues/17826)) +- Fix some typing issues uncovered by upgrading mypy to 1.11.x. ([\#17842](https://github.com/element-hq/synapse/issues/17842)) + + + +### Updates to locked dependencies + +* Bump mypy from 1.10.1 to 1.11.2. ([\#17842](https://github.com/element-hq/synapse/issues/17842)) +* Bump mypy-zope from 1.0.5 to 1.0.7. ([\#17827](https://github.com/element-hq/synapse/issues/17827)) +* Bump phonenumbers from 8.13.46 to 8.13.47. ([\#17797](https://github.com/element-hq/synapse/issues/17797)) +* Bump psycopg2 from 2.9.9 to 2.9.10. ([\#17843](https://github.com/element-hq/synapse/issues/17843)) +* Bump ruff from 0.6.8 to 0.6.9. ([\#17794](https://github.com/element-hq/synapse/issues/17794)) +* Bump sentry-sdk from 2.14.0 to 2.15.0. ([\#17795](https://github.com/element-hq/synapse/issues/17795)) +* Bump sentry-sdk from 2.15.0 to 2.16.0. ([\#17829](https://github.com/element-hq/synapse/issues/17829)) +* Bump sentry-sdk from 2.16.0 to 2.17.0. ([\#17844](https://github.com/element-hq/synapse/issues/17844)) +* Bump sigstore/cosign-installer from 3.6.0 to 3.7.0. ([\#17798](https://github.com/element-hq/synapse/issues/17798)) +* Bump tomli from 2.0.1 to 2.0.2. ([\#17796](https://github.com/element-hq/synapse/issues/17796)) +* Bump types-requests from 2.32.0.20240914 to 2.32.0.20241016. ([\#17841](https://github.com/element-hq/synapse/issues/17841)) +* Bump types-setuptools from 75.1.0.20240917 to 75.1.0.20241014. ([\#17828](https://github.com/element-hq/synapse/issues/17828)) + +# Synapse 1.117.0 (2024-10-15) + +No significant changes since 1.117.0rc1. + + + + +# Synapse 1.117.0rc1 (2024-10-08) + +### Features + +- Add config option `redis.password_path`. ([\#17717](https://github.com/element-hq/synapse/issues/17717)) + +### Bugfixes + +- Fix a rare bug introduced in v1.29.0 where invalidating a user's access token from a worker could raise an error. ([\#17779](https://github.com/element-hq/synapse/issues/17779)) +- In the response to `GET /_matrix/client/versions`, set the `unstable_features` flag for [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140) to `false` when server configuration disables support for delayed events. ([\#17780](https://github.com/element-hq/synapse/issues/17780)) +- Improve input validation and room membership checks in admin redaction API. ([\#17792](https://github.com/element-hq/synapse/issues/17792)) + +### Improved Documentation + +- Clarify the docstring of `test_forget_when_not_left`. ([\#17628](https://github.com/element-hq/synapse/issues/17628)) +- Add documentation note about PYTHONMALLOC for accurate jemalloc memory tracking. Contributed by @hensg. ([\#17709](https://github.com/element-hq/synapse/issues/17709)) +- Remove spurious "TODO UPDATE ALL THIS" note in the Debian installation docs. ([\#17749](https://github.com/element-hq/synapse/issues/17749)) +- Explain how load balancing works for `federation_sender_instances`. ([\#17776](https://github.com/element-hq/synapse/issues/17776)) + +### Internal Changes + +- Minor performance increase for large accounts using sliding sync. ([\#17751](https://github.com/element-hq/synapse/issues/17751)) +- Increase performance of the notifier when there are many syncing users. ([\#17765](https://github.com/element-hq/synapse/issues/17765), [\#17766](https://github.com/element-hq/synapse/issues/17766)) +- Fix performance of streams that don't change often. ([\#17767](https://github.com/element-hq/synapse/issues/17767)) +- Improve performance of sliding sync connections that do not ask for any rooms. ([\#17768](https://github.com/element-hq/synapse/issues/17768)) +- Reduce overhead of sliding sync E2EE loops. ([\#17771](https://github.com/element-hq/synapse/issues/17771)) +- Sliding sync minor performance speed up using new table. ([\#17787](https://github.com/element-hq/synapse/issues/17787)) +- Sliding sync minor performance improvement by omitting unchanged data from incremental responses. ([\#17788](https://github.com/element-hq/synapse/issues/17788)) +- Speed up sliding sync when there are many active subscriptions. ([\#17789](https://github.com/element-hq/synapse/issues/17789)) +- Add missing license headers on new source files. ([\#17799](https://github.com/element-hq/synapse/issues/17799)) + + + +### Updates to locked dependencies + +* Bump phonenumbers from 8.13.45 to 8.13.46. ([\#17773](https://github.com/element-hq/synapse/issues/17773)) +* Bump python-multipart from 0.0.10 to 0.0.12. ([\#17772](https://github.com/element-hq/synapse/issues/17772)) +* Bump regex from 1.10.6 to 1.11.0. ([\#17770](https://github.com/element-hq/synapse/issues/17770)) +* Bump ruff from 0.6.7 to 0.6.8. ([\#17774](https://github.com/element-hq/synapse/issues/17774)) + +# Synapse 1.116.0 (2024-10-01) + +No significant changes since 1.116.0rc2. + + + + +# Synapse 1.116.0rc2 (2024-09-26) + +### Features + +- Add implementation of restricting who can overwrite a state event as proposed by [MSC3757](https://github.com/matrix-org/matrix-spec-proposals/pull/3757). ([\#17513](https://github.com/element-hq/synapse/issues/17513)) + + + + +# Synapse 1.116.0rc1 (2024-09-25) + +### Features + +- Add initial implementation of delayed events as proposed by [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140). ([\#17326](https://github.com/element-hq/synapse/issues/17326)) +- Add an asynchronous Admin API endpoint [to redact all a user's events](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#redact-all-the-events-of-a-user), + and [an endpoint to check on the status of that redaction task](https://element-hq.github.io/synapse/v1.116/admin_api/user_admin_api.html#check-the-status-of-a-redaction-process). ([\#17506](https://github.com/element-hq/synapse/issues/17506)) +- Add support for the `tags` and `not_tags` filters for [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17662](https://github.com/element-hq/synapse/issues/17662)) +- Guests can use the new media endpoints to download media, as described by [MSC4189](https://github.com/matrix-org/matrix-spec-proposals/pull/4189). ([\#17675](https://github.com/element-hq/synapse/issues/17675)) +- Add config option `turn_shared_secret_path`. ([\#17690](https://github.com/element-hq/synapse/issues/17690)) +- Return room tags in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync account data extension. ([\#17707](https://github.com/element-hq/synapse/issues/17707)) + +### Bugfixes + +- Make sure we get up-to-date state information when using the new [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync tables to derive room membership. ([\#17692](https://github.com/element-hq/synapse/issues/17692)) +- Fix bug where room account data would not correctly be sent down [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync for old rooms. ([\#17695](https://github.com/element-hq/synapse/issues/17695)) +- Fix a bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync which could prevent /sync from working for certain user accounts. ([\#17727](https://github.com/element-hq/synapse/issues/17727), [\#17733](https://github.com/element-hq/synapse/issues/17733)) +- Ignore invites from ignored users in Sliding Sync. ([\#17729](https://github.com/element-hq/synapse/issues/17729)) +- Fix bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync where the server would incorrectly return a negative bump stamp, which caused Element X apps to stop syncing. ([\#17748](https://github.com/element-hq/synapse/issues/17748)) + +### Internal Changes + +- Import pydantic objects from the `_pydantic_compat` module. + This allows `check_pydantic_models.py` to mock those pydantic objects + only in the synapse module, and not interfere with pydantic objects in + external dependencies. ([\#17667](https://github.com/element-hq/synapse/issues/17667)) +- Use [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync tables as a bulk shortcut for getting the max `event_stream_ordering` of rooms. ([\#17693](https://github.com/element-hq/synapse/issues/17693)) +- Speed up [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) sliding sync requests a bit where there are many room changes. ([\#17696](https://github.com/element-hq/synapse/issues/17696)) +- Refactor [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) sliding sync filter unit tests so the sliding sync API has better test coverage. ([\#17703](https://github.com/element-hq/synapse/issues/17703)) +- Fetch `bump_stamp`s more efficiently in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17723](https://github.com/element-hq/synapse/issues/17723)) +- Shortcut for checking if certain background updates have completed (utilized in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync). ([\#17724](https://github.com/element-hq/synapse/issues/17724)) +- More efficiently fetch rooms for [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17725](https://github.com/element-hq/synapse/issues/17725)) +- Fix `_bulk_get_max_event_pos` being inefficient. ([\#17728](https://github.com/element-hq/synapse/issues/17728)) +- Add cache to `get_tags_for_room(...)`. ([\#17730](https://github.com/element-hq/synapse/issues/17730)) +- Small performance improvement in speeding up [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync. ([\#17731](https://github.com/element-hq/synapse/issues/17731)) +- Minor speed up of initial [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) sliding sync requests. ([\#17734](https://github.com/element-hq/synapse/issues/17734)) +- Remove usage of the deprecated `cgi` module, deprecated in Python 3.11 and removed in Python 3.13. ([\#17741](https://github.com/element-hq/synapse/issues/17741)) +- Fix typing of a variable that is not `Unknown` anymore after updating `treq`. ([\#17744](https://github.com/element-hq/synapse/issues/17744)) + + + +### Updates to locked dependencies + +* Bump anyhow from 1.0.86 to 1.0.89. ([\#17685](https://github.com/element-hq/synapse/issues/17685), [\#17716](https://github.com/element-hq/synapse/issues/17716)) +* Bump bytes from 1.7.1 to 1.7.2. ([\#17743](https://github.com/element-hq/synapse/issues/17743)) +* Bump cryptography from 43.0.0 to 43.0.1. ([\#17689](https://github.com/element-hq/synapse/issues/17689)) +* Bump idna from 3.8 to 3.10. ([\#17758](https://github.com/element-hq/synapse/issues/17758)) +* Bump msgpack from 1.0.8 to 1.1.0. ([\#17759](https://github.com/element-hq/synapse/issues/17759)) +* Bump phonenumbers from 8.13.44 to 8.13.45. ([\#17762](https://github.com/element-hq/synapse/issues/17762)) +* Bump prometheus-client from 0.20.0 to 0.21.0. ([\#17746](https://github.com/element-hq/synapse/issues/17746)) +* Bump pyasn1 from 0.6.0 to 0.6.1. ([\#17714](https://github.com/element-hq/synapse/issues/17714)) +* Bump pyasn1-modules from 0.4.0 to 0.4.1. ([\#17747](https://github.com/element-hq/synapse/issues/17747)) +* Bump pydantic from 2.8.2 to 2.9.2. ([\#17756](https://github.com/element-hq/synapse/issues/17756)) +* Bump python-multipart from 0.0.9 to 0.0.10. ([\#17745](https://github.com/element-hq/synapse/issues/17745)) +* Bump ruff from 0.6.4 to 0.6.7. ([\#17715](https://github.com/element-hq/synapse/issues/17715), [\#17760](https://github.com/element-hq/synapse/issues/17760)) +* Bump sentry-sdk from 2.13.0 to 2.14.0. ([\#17712](https://github.com/element-hq/synapse/issues/17712)) +* Bump serde from 1.0.209 to 1.0.210. ([\#17686](https://github.com/element-hq/synapse/issues/17686)) +* Bump serde_json from 1.0.127 to 1.0.128. ([\#17687](https://github.com/element-hq/synapse/issues/17687)) +* Bump treq from 23.11.0 to 24.9.1. ([\#17744](https://github.com/element-hq/synapse/issues/17744)) +* Bump types-pyyaml from 6.0.12.20240808 to 6.0.12.20240917. ([\#17755](https://github.com/element-hq/synapse/issues/17755)) +* Bump types-requests from 2.32.0.20240712 to 2.32.0.20240914. ([\#17713](https://github.com/element-hq/synapse/issues/17713)) +* Bump types-setuptools from 74.1.0.20240907 to 75.1.0.20240917. ([\#17757](https://github.com/element-hq/synapse/issues/17757)) + +# Synapse 1.115.0 (2024-09-17) + +No significant changes since 1.115.0rc2. + + + + +# Synapse 1.115.0rc2 (2024-09-12) + +### Internal Changes + +- Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. ([\#17652](https://github.com/element-hq/synapse/issues/17652)) +- Speed up sliding sync by reducing amount of data pulled out of the database for large rooms. ([\#17683](https://github.com/element-hq/synapse/issues/17683)) + + + + +# Synapse 1.115.0rc1 (2024-09-10) + +### Features + +- Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. ([\#17509](https://github.com/element-hq/synapse/issues/17509)) + +### Bugfixes + +- Return `400 M_BAD_JSON` upon attempting to complete various room actions with a non-local user ID and unknown room ID, rather than an internal server error. ([\#17607](https://github.com/element-hq/synapse/issues/17607)) +- Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17626](https://github.com/element-hq/synapse/issues/17626)) +- Fix bug where we returned the wrong `bump_stamp` for invites in sliding sync response, causing incorrect ordering of invites in the room list. ([\#17674](https://github.com/element-hq/synapse/issues/17674)) + +### Improved Documentation + +- Clarify that the admin api resource is only loaded on the main process and not workers. ([\#17590](https://github.com/element-hq/synapse/issues/17590)) +- Fixed typo in `saml2_config` config [example](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#saml2_config). ([\#17594](https://github.com/element-hq/synapse/issues/17594)) + +### Deprecations and Removals + +- Stabilise [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156) by removing the `msc4156_enabled` config setting and defaulting it to `true`. ([\#17650](https://github.com/element-hq/synapse/issues/17650)) + +### Internal Changes + +- Update [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407)) +- Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. ([\#17512](https://github.com/element-hq/synapse/issues/17512), [\#17632](https://github.com/element-hq/synapse/issues/17632), [\#17633](https://github.com/element-hq/synapse/issues/17633), [\#17634](https://github.com/element-hq/synapse/issues/17634), [\#17635](https://github.com/element-hq/synapse/issues/17635), [\#17636](https://github.com/element-hq/synapse/issues/17636), [\#17641](https://github.com/element-hq/synapse/issues/17641), [\#17654](https://github.com/element-hq/synapse/issues/17654), [\#17673](https://github.com/element-hq/synapse/issues/17673)) +- Store sliding sync per-connection state in the database. ([\#17599](https://github.com/element-hq/synapse/issues/17599), [\#17631](https://github.com/element-hq/synapse/issues/17631)) +- Make the sliding sync `PerConnectionState` class immutable. ([\#17600](https://github.com/element-hq/synapse/issues/17600)) +- Replace `isort` and `black` with `ruff`. ([\#17620](https://github.com/element-hq/synapse/issues/17620), [\#17643](https://github.com/element-hq/synapse/issues/17643)) +- Sliding Sync: Split up `get_room_membership_for_user_at_to_token`. ([\#17629](https://github.com/element-hq/synapse/issues/17629)) +- Use new database tables for sliding sync. ([\#17630](https://github.com/element-hq/synapse/issues/17630), [\#17649](https://github.com/element-hq/synapse/issues/17649)) +- Prevent duplicate tags being added to Sliding Sync traces. ([\#17655](https://github.com/element-hq/synapse/issues/17655)) +- Get `bump_stamp` from [new sliding sync tables](https://github.com/element-hq/synapse/pull/17512) which should be faster. ([\#17658](https://github.com/element-hq/synapse/issues/17658)) +- Speed up incremental Sliding Sync requests by avoiding extra work. ([\#17665](https://github.com/element-hq/synapse/issues/17665)) +- Small performance improvement in speeding up sliding sync. ([\#17666](https://github.com/element-hq/synapse/issues/17666), [\#17670](https://github.com/element-hq/synapse/issues/17670), [\#17672](https://github.com/element-hq/synapse/issues/17672)) +- Speed up sliding sync by reducing number of database calls. ([\#17684](https://github.com/element-hq/synapse/issues/17684)) +- Speed up sync by pulling out fewer events from the database. ([\#17688](https://github.com/element-hq/synapse/issues/17688)) + + + +### Updates to locked dependencies + +* Bump authlib from 1.3.1 to 1.3.2. ([\#17679](https://github.com/element-hq/synapse/issues/17679)) +* Bump idna from 3.7 to 3.8. ([\#17682](https://github.com/element-hq/synapse/issues/17682)) +* Bump ruff from 0.6.2 to 0.6.4. ([\#17680](https://github.com/element-hq/synapse/issues/17680)) +* Bump towncrier from 24.7.1 to 24.8.0. ([\#17645](https://github.com/element-hq/synapse/issues/17645)) +* Bump twisted from 24.7.0rc1 to 24.7.0. ([\#17647](https://github.com/element-hq/synapse/issues/17647)) +* Bump types-pillow from 10.2.0.20240520 to 10.2.0.20240822. ([\#17644](https://github.com/element-hq/synapse/issues/17644)) +* Bump types-psycopg2 from 2.9.21.20240417 to 2.9.21.20240819. ([\#17646](https://github.com/element-hq/synapse/issues/17646)) +* Bump types-setuptools from 71.1.0.20240818 to 74.1.0.20240907. ([\#17681](https://github.com/element-hq/synapse/issues/17681)) + +# Synapse 1.114.0 (2024-09-02) + +This release enables support for +[MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) — +Simplified Sliding Sync. This allows using the upcoming releases of the Element +X mobile apps without having to run a Sliding Sync Proxy. + + +### Features + +- Enable native sliding sync support ([MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) and [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186)) by default. ([\#17648](https://github.com/element-hq/synapse/issues/17648)) + + + + +# Synapse 1.114.0rc3 (2024-08-30) + +### Bugfixes + +- Fix regression in v1.114.0rc2 that caused workers to fail to start. ([\#17626](https://github.com/element-hq/synapse/issues/17626)) + + + + +# Synapse 1.114.0rc2 (2024-08-30) + +### Features + +- Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. ([\#17509](https://github.com/element-hq/synapse/issues/17509)) +- Make `hash_password` script accept password input from stdin. ([\#17608](https://github.com/element-hq/synapse/issues/17608)) + +### Bugfixes + +- Fix hierarchy returning 403 when room is accessible through federation. Contributed by Krishan (@kfiven). ([\#17194](https://github.com/element-hq/synapse/issues/17194)) +- Fix content-length on federation `/thumbnail` responses. ([\#17532](https://github.com/element-hq/synapse/issues/17532)) +- Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17543](https://github.com/element-hq/synapse/issues/17543)) + +### Internal Changes + +- MSC3861: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407)) +- Refactor sliding sync class into multiple files. ([\#17595](https://github.com/element-hq/synapse/issues/17595)) +- Store sliding sync per-connection state in the database. ([\#17599](https://github.com/element-hq/synapse/issues/17599)) +- Make the sliding sync `PerConnectionState` class immutable. ([\#17600](https://github.com/element-hq/synapse/issues/17600)) +- Add support to `@tag_args` for standalone functions. ([\#17604](https://github.com/element-hq/synapse/issues/17604)) +- Speed up incremental syncs in sliding sync by adding some more caching. ([\#17606](https://github.com/element-hq/synapse/issues/17606)) +- Always return the user's own read receipts in sliding sync. ([\#17617](https://github.com/element-hq/synapse/issues/17617)) +- Replace `isort` and `black` with `ruff`. ([\#17620](https://github.com/element-hq/synapse/issues/17620)) +- Refactor sliding sync code to move room list logic out into a separate class. ([\#17622](https://github.com/element-hq/synapse/issues/17622)) + + + +### Updates to locked dependencies + +* Bump attrs from 23.2.0 to 24.2.0. ([\#17609](https://github.com/element-hq/synapse/issues/17609)) +* Bump cryptography from 42.0.8 to 43.0.0. ([\#17584](https://github.com/element-hq/synapse/issues/17584)) +* Bump phonenumbers from 8.13.43 to 8.13.44. ([\#17610](https://github.com/element-hq/synapse/issues/17610)) +* Bump pygithub from 2.3.0 to 2.4.0. ([\#17612](https://github.com/element-hq/synapse/issues/17612)) +* Bump pyyaml from 6.0.1 to 6.0.2. ([\#17611](https://github.com/element-hq/synapse/issues/17611)) +* Bump sentry-sdk from 2.12.0 to 2.13.0. ([\#17585](https://github.com/element-hq/synapse/issues/17585)) +* Bump serde from 1.0.206 to 1.0.208. ([\#17581](https://github.com/element-hq/synapse/issues/17581)) +* Bump serde from 1.0.208 to 1.0.209. ([\#17613](https://github.com/element-hq/synapse/issues/17613)) +* Bump serde_json from 1.0.124 to 1.0.125. ([\#17582](https://github.com/element-hq/synapse/issues/17582)) +* Bump serde_json from 1.0.125 to 1.0.127. ([\#17614](https://github.com/element-hq/synapse/issues/17614)) +* Bump types-jsonschema from 4.23.0.20240712 to 4.23.0.20240813. ([\#17583](https://github.com/element-hq/synapse/issues/17583)) +* Bump types-setuptools from 71.1.0.20240726 to 71.1.0.20240818. ([\#17586](https://github.com/element-hq/synapse/issues/17586)) + +# Synapse 1.114.0rc1 (2024-08-20) + +### Features + +- Add a flag to `/versions`, `org.matrix.simplified_msc3575`, to indicate whether experimental sliding sync support has been enabled. ([\#17571](https://github.com/element-hq/synapse/issues/17571)) +- Handle changes in `timeline_limit` in experimental sliding sync. ([\#17579](https://github.com/element-hq/synapse/issues/17579)) +- Correctly track read receipts that should be sent down in experimental sliding sync. ([\#17575](https://github.com/element-hq/synapse/issues/17575), [\#17589](https://github.com/element-hq/synapse/issues/17589), [\#17592](https://github.com/element-hq/synapse/issues/17592)) + +### Bugfixes + +- Start handlers for new media endpoints when media resource configured. ([\#17483](https://github.com/element-hq/synapse/issues/17483)) +- Fix timeline ordering (using `stream_ordering` instead of topological ordering) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17510](https://github.com/element-hq/synapse/issues/17510)) +- Fix experimental sliding sync implementation to remember any updates in rooms that were not sent down immediately. ([\#17535](https://github.com/element-hq/synapse/issues/17535)) +- Better exclude partially stated rooms if we must await full state in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17538](https://github.com/element-hq/synapse/issues/17538)) +- Handle lower-case http headers in `_Mulitpart_Parser_Protocol`. ([\#17545](https://github.com/element-hq/synapse/issues/17545)) +- Fix fetching federation signing keys from servers that omit `old_verify_keys`. Contributed by @tulir @ Beeper. ([\#17568](https://github.com/element-hq/synapse/issues/17568)) +- Fix bug where we would respond with an error when a remote server asked for media that had a length of 0, using the new multipart federation media endpoint. ([\#17570](https://github.com/element-hq/synapse/issues/17570)) + +### Improved Documentation + +- Clarify default behaviour of the + [`auto_accept_invites.worker_to_run_on`](https://element-hq.github.io/synapse/develop/usage/configuration/config_documentation.html#auto-accept-invites) + option. ([\#17515](https://github.com/element-hq/synapse/issues/17515)) +- Improve docstrings for profile methods. ([\#17559](https://github.com/element-hq/synapse/issues/17559)) + +### Internal Changes + +- Add more tracing to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17514](https://github.com/element-hq/synapse/issues/17514)) +- Fixup comment in sliding sync implementation. ([\#17531](https://github.com/element-hq/synapse/issues/17531)) +- Replace override of deprecated method `HTTPAdapter.get_connection` with `get_connection_with_tls_context`. ([\#17536](https://github.com/element-hq/synapse/issues/17536)) +- Fix performance of device lists in `/key/changes` and sliding sync. ([\#17537](https://github.com/element-hq/synapse/issues/17537), [\#17548](https://github.com/element-hq/synapse/issues/17548)) +- Bump setuptools from 67.6.0 to 72.1.0. ([\#17542](https://github.com/element-hq/synapse/issues/17542)) +- Add a utility function for generating random event IDs. ([\#17557](https://github.com/element-hq/synapse/issues/17557)) +- Speed up responding to media requests. ([\#17558](https://github.com/element-hq/synapse/issues/17558), [\#17561](https://github.com/element-hq/synapse/issues/17561), [\#17564](https://github.com/element-hq/synapse/issues/17564), [\#17566](https://github.com/element-hq/synapse/issues/17566), [\#17567](https://github.com/element-hq/synapse/issues/17567), [\#17569](https://github.com/element-hq/synapse/issues/17569)) +- Test github token before running release script steps. ([\#17562](https://github.com/element-hq/synapse/issues/17562)) +- Reduce log spam of multipart files. ([\#17563](https://github.com/element-hq/synapse/issues/17563)) +- Refactor per-connection state in experimental sliding sync handler. ([\#17574](https://github.com/element-hq/synapse/issues/17574)) +- Add histogram metrics for sliding sync processing time. ([\#17593](https://github.com/element-hq/synapse/issues/17593)) + + + +### Updates to locked dependencies + +* Bump bytes from 1.6.1 to 1.7.1. ([\#17526](https://github.com/element-hq/synapse/issues/17526)) +* Bump lxml from 5.2.2 to 5.3.0. ([\#17550](https://github.com/element-hq/synapse/issues/17550)) +* Bump phonenumbers from 8.13.42 to 8.13.43. ([\#17551](https://github.com/element-hq/synapse/issues/17551)) +* Bump regex from 1.10.5 to 1.10.6. ([\#17527](https://github.com/element-hq/synapse/issues/17527)) +* Bump sentry-sdk from 2.10.0 to 2.12.0. ([\#17553](https://github.com/element-hq/synapse/issues/17553)) +* Bump serde from 1.0.204 to 1.0.206. ([\#17556](https://github.com/element-hq/synapse/issues/17556)) +* Bump serde_json from 1.0.122 to 1.0.124. ([\#17555](https://github.com/element-hq/synapse/issues/17555)) +* Bump sigstore/cosign-installer from 3.5.0 to 3.6.0. ([\#17549](https://github.com/element-hq/synapse/issues/17549)) +* Bump types-pyyaml from 6.0.12.20240311 to 6.0.12.20240808. ([\#17552](https://github.com/element-hq/synapse/issues/17552)) +* Bump types-requests from 2.31.0.20240406 to 2.32.0.20240712. ([\#17524](https://github.com/element-hq/synapse/issues/17524)) + +# Synapse 1.113.0 (2024-08-13) + +No significant changes since 1.113.0rc1. + + + + +# Synapse 1.113.0rc1 (2024-08-06) + +### Features + +- Track which rooms have been sent to clients in the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17447](https://github.com/element-hq/synapse/issues/17447)) +- Add Account Data extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17477](https://github.com/element-hq/synapse/issues/17477)) +- Add receipts extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17489](https://github.com/element-hq/synapse/issues/17489)) +- Add typing notification extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17505](https://github.com/element-hq/synapse/issues/17505)) + +### Bugfixes + +- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to handle invite/knock rooms when filtering. ([\#17450](https://github.com/element-hq/synapse/issues/17450)) +- Fix a bug introduced in v1.110.0 which caused `/keys/query` to return incomplete results, leading to high network activity and CPU usage on Matrix clients. ([\#17499](https://github.com/element-hq/synapse/issues/17499)) + +### Improved Documentation + +- Update the [`allowed_local_3pids`](https://element-hq.github.io/synapse/v1.112/usage/configuration/config_documentation.html#allowed_local_3pids) config option's msisdn address to a working example. ([\#17476](https://github.com/element-hq/synapse/issues/17476)) + +### Internal Changes + +- Change sliding sync to use their own token format in preparation for storing per-connection state. ([\#17452](https://github.com/element-hq/synapse/issues/17452)) +- Ensure we don't send down negative `bump_stamp` in experimental sliding sync endpoint. ([\#17478](https://github.com/element-hq/synapse/issues/17478)) +- Do not send down empty room entries down experimental sliding sync endpoint. ([\#17479](https://github.com/element-hq/synapse/issues/17479)) +- Refactor Sliding Sync tests to better utilize the `SlidingSyncBase`. ([\#17481](https://github.com/element-hq/synapse/issues/17481), [\#17482](https://github.com/element-hq/synapse/issues/17482)) +- Add some opentracing tags and logging to the experimental sliding sync implementation. ([\#17501](https://github.com/element-hq/synapse/issues/17501)) +- Split and move Sliding Sync tests so we have some more sane test file sizes. ([\#17504](https://github.com/element-hq/synapse/issues/17504)) +- Update the `limited` field description in the Sliding Sync response to accurately describe what it actually represents. ([\#17507](https://github.com/element-hq/synapse/issues/17507)) +- Easier to understand `timeline` assertions in Sliding Sync tests. ([\#17511](https://github.com/element-hq/synapse/issues/17511)) +- Reset the sliding sync connection if we don't recognize the per-connection state position. ([\#17529](https://github.com/element-hq/synapse/issues/17529)) + + + +### Updates to locked dependencies + +* Bump bcrypt from 4.1.3 to 4.2.0. ([\#17495](https://github.com/element-hq/synapse/issues/17495)) +* Bump black from 24.4.2 to 24.8.0. ([\#17522](https://github.com/element-hq/synapse/issues/17522)) +* Bump phonenumbers from 8.13.39 to 8.13.42. ([\#17521](https://github.com/element-hq/synapse/issues/17521)) +* Bump ruff from 0.5.4 to 0.5.5. ([\#17494](https://github.com/element-hq/synapse/issues/17494)) +* Bump serde_json from 1.0.120 to 1.0.121. ([\#17493](https://github.com/element-hq/synapse/issues/17493)) +* Bump serde_json from 1.0.121 to 1.0.122. ([\#17525](https://github.com/element-hq/synapse/issues/17525)) +* Bump towncrier from 23.11.0 to 24.7.1. ([\#17523](https://github.com/element-hq/synapse/issues/17523)) +* Bump types-pyopenssl from 24.1.0.20240425 to 24.1.0.20240722. ([\#17496](https://github.com/element-hq/synapse/issues/17496)) +* Bump types-setuptools from 70.1.0.20240627 to 71.1.0.20240726. ([\#17497](https://github.com/element-hq/synapse/issues/17497)) + +# Synapse 1.112.0 (2024-07-30) + +This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7). + +Note that this security fix is also available as **Synapse 1.111.1**, which does not include the rest of the changes in Synapse 1.112.0. + +This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request. +If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality. + +With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**. +The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration. + +Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today. + +**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time. + +### Internal Changes + +- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502)) + + +# Synapse 1.112.0rc1 (2024-07-23) + +Please note that this release candidate does not include the security dependency update +included in version 1.111.1 as this version was released before 1.111.1. +The same security fix can be found in the full release of 1.112.0. + +### Features + +- Add to-device extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17416](https://github.com/element-hq/synapse/issues/17416)) +- Populate `name`/`avatar` fields in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17418](https://github.com/element-hq/synapse/issues/17418)) +- Populate `heroes` and room summary fields (`joined_count`, `invited_count`) in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17419](https://github.com/element-hq/synapse/issues/17419)) +- Populate `is_dm` room field in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17429](https://github.com/element-hq/synapse/issues/17429)) +- Add room subscriptions to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17432](https://github.com/element-hq/synapse/issues/17432)) +- Prepare for authenticated media freeze. ([\#17433](https://github.com/element-hq/synapse/issues/17433)) +- Add E2EE extension support to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17454](https://github.com/element-hq/synapse/issues/17454)) + +### Bugfixes + +- Add configurable option to always include offline users in presence sync results. Contributed by @Michael-Hollister. ([\#17231](https://github.com/element-hq/synapse/issues/17231)) +- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using room type filters and the user has one or more remote invites. ([\#17434](https://github.com/element-hq/synapse/issues/17434)) +- Order `heroes` by `stream_ordering` as the Matrix specification states (applies to `/sync`). ([\#17435](https://github.com/element-hq/synapse/issues/17435)) +- Fix rare bug where `/sync` would break for a user when using workers with multiple stream writers. ([\#17438](https://github.com/element-hq/synapse/issues/17438)) + +### Improved Documentation + +- Update the readme image to have a white background, so that it is readable in dark mode. ([\#17387](https://github.com/element-hq/synapse/issues/17387)) +- Add Red Hat Enterprise Linux and Rocky Linux 8 and 9 installation instructions. ([\#17423](https://github.com/element-hq/synapse/issues/17423)) +- Improve documentation for the [`default_power_level_content_override`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#default_power_level_content_override) config option. ([\#17451](https://github.com/element-hq/synapse/issues/17451)) + +### Internal Changes + +- Make sure we always use the right logic for enabling the media repo. ([\#17424](https://github.com/element-hq/synapse/issues/17424)) +- Fix argument documentation for method `RateLimiter.record_action`. ([\#17426](https://github.com/element-hq/synapse/issues/17426)) +- Reduce volume of 'Waiting for current token' logs, which were introduced in v1.109.0. ([\#17428](https://github.com/element-hq/synapse/issues/17428)) +- Limit concurrent remote downloads to 6 per IP address, and decrement remote downloads without a content-length from the ratelimiter after the download is complete. ([\#17439](https://github.com/element-hq/synapse/issues/17439)) +- Remove unnecessary call to resume producing in fake channel. ([\#17449](https://github.com/element-hq/synapse/issues/17449)) +- Update experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint to bump room when it is created. ([\#17453](https://github.com/element-hq/synapse/issues/17453)) +- Speed up generating sliding sync responses. ([\#17458](https://github.com/element-hq/synapse/issues/17458)) +- Add cache to `get_rooms_for_local_user_where_membership_is` to speed up sliding sync. ([\#17460](https://github.com/element-hq/synapse/issues/17460)) +- Speed up fetching room keys from backup. ([\#17461](https://github.com/element-hq/synapse/issues/17461)) +- Speed up sorting of the room list in sliding sync. ([\#17468](https://github.com/element-hq/synapse/issues/17468)) +- Implement handling of `$ME` as a state key in sliding sync. ([\#17469](https://github.com/element-hq/synapse/issues/17469)) + + + +### Updates to locked dependencies + +* Bump bytes from 1.6.0 to 1.6.1. ([\#17441](https://github.com/element-hq/synapse/issues/17441)) +* Bump hiredis from 2.3.2 to 3.0.0. ([\#17464](https://github.com/element-hq/synapse/issues/17464)) +* Bump jsonschema from 4.22.0 to 4.23.0. ([\#17444](https://github.com/element-hq/synapse/issues/17444)) +* Bump matrix-org/done-action from 2 to 3. ([\#17440](https://github.com/element-hq/synapse/issues/17440)) +* Bump mypy from 1.9.0 to 1.10.1. ([\#17445](https://github.com/element-hq/synapse/issues/17445)) +* Bump pyopenssl from 24.1.0 to 24.2.1. ([\#17465](https://github.com/element-hq/synapse/issues/17465)) +* Bump ruff from 0.5.0 to 0.5.4. ([\#17466](https://github.com/element-hq/synapse/issues/17466)) +* Bump sentry-sdk from 2.6.0 to 2.8.0. ([\#17456](https://github.com/element-hq/synapse/issues/17456)) +* Bump sentry-sdk from 2.8.0 to 2.10.0. ([\#17467](https://github.com/element-hq/synapse/issues/17467)) +* Bump setuptools from 67.6.0 to 70.0.0. ([\#17448](https://github.com/element-hq/synapse/issues/17448)) +* Bump twine from 5.1.0 to 5.1.1. ([\#17443](https://github.com/element-hq/synapse/issues/17443)) +* Bump types-jsonschema from 4.22.0.20240610 to 4.23.0.20240712. ([\#17446](https://github.com/element-hq/synapse/issues/17446)) +* Bump ulid from 1.1.2 to 1.1.3. ([\#17442](https://github.com/element-hq/synapse/issues/17442)) +* Bump zipp from 3.15.0 to 3.19.1. ([\#17427](https://github.com/element-hq/synapse/issues/17427)) + + +# Synapse 1.111.1 (2024-07-30) + +This security release is to update our locked dependency on Twisted to 24.7.0rc1, which includes a security fix for [CVE-2024-41671 / GHSA-c8m8-j448-xjx7: Disordered HTTP pipeline response in twisted.web, again](https://github.com/twisted/twisted/security/advisories/GHSA-c8m8-j448-xjx7). + +This issue means that, if multiple HTTP requests are pipelined in the same TCP connection, Synapse can send responses to the wrong HTTP request. +If a reverse proxy was configured to use HTTP pipelining, this could result in responses being sent to the wrong user, severely harming confidentiality. + +With that said, despite being a high severity issue, **we consider it unlikely that Synapse installations will be affected**. +The use of HTTP pipelining in this fashion would cause worse performance for clients (request-response latencies would be increased as users' responses would be artificially blocked behind other users' slow requests). Further, Nginx and Haproxy, two common reverse proxies, do not appear to support configuring their upstreams to use HTTP pipelining and thus would not be affected. For both of these reasons, we consider it unlikely that a Synapse deployment would be set up in such a configuration. + +Despite that, we cannot rule out that some installations may exist with this unusual setup and so we are releasing this security update today. + +**pip users:** Note that by default, upgrading Synapse using pip will not automatically upgrade Twisted. **Please manually install the new version of Twisted** using `pip install Twisted==24.7.0rc1`. Note also that even the `--upgrade-strategy=eager` flag to `pip install -U matrix-synapse` will not upgrade Twisted to a patched version because it is only a release candidate at this time. + + +### Internal Changes + +- Upgrade locked dependency on Twisted to 24.7.0rc1. ([\#17502](https://github.com/element-hq/synapse/issues/17502)) + + +# Synapse 1.111.0 (2024-07-16) + +No significant changes since 1.111.0rc2. + + + + +# Synapse 1.111.0rc2 (2024-07-10) + +### Bugfixes + +- Fix bug where using `synapse.app.media_repository` worker configuration would break the new media endpoints. ([\#17420](https://github.com/element-hq/synapse/issues/17420)) + +### Improved Documentation + +- Document the new federation media worker endpoints in the [upgrade notes](https://element-hq.github.io/synapse/v1.111/upgrade.html) and [worker docs](https://element-hq.github.io/synapse/v1.111/workers.html). ([\#17421](https://github.com/element-hq/synapse/issues/17421)) + +### Internal Changes + +- Route authenticated federation media requests to media repository workers in Complement tests. ([\#17422](https://github.com/element-hq/synapse/issues/17422)) + + + + +# Synapse 1.111.0rc1 (2024-07-09) + +### Features + +- Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17320](https://github.com/element-hq/synapse/issues/17320)) +- Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17337](https://github.com/element-hq/synapse/issues/17337)) +- Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17342](https://github.com/element-hq/synapse/issues/17342)) +- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding [`_matrix/client/v1/media/download`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediadownloadservernamemediaid) endpoint. ([\#17365](https://github.com/element-hq/synapse/issues/17365)) +- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md) + by adding [`_matrix/client/v1/media/thumbnail`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediathumbnailservernamemediaid), [`_matrix/federation/v1/media/thumbnail`](https://spec.matrix.org/v1.11/server-server-api/#get_matrixfederationv1mediathumbnailmediaid) endpoints and stabilizing the + remaining [`_matrix/client/v1/media`](https://spec.matrix.org/v1.11/client-server-api/#get_matrixclientv1mediaconfig) endpoints. ([\#17388](https://github.com/element-hq/synapse/issues/17388)) +- Add `rooms.bump_stamp` for easier client-side sorting in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17395](https://github.com/element-hq/synapse/issues/17395)) +- Forget all of a user's rooms upon deactivation, preventing local room purges from being blocked on deactivated users. ([\#17400](https://github.com/element-hq/synapse/issues/17400)) +- Declare support for [Matrix 1.11](https://matrix.org/blog/2024/06/20/matrix-v1.11-release/). ([\#17403](https://github.com/element-hq/synapse/issues/17403)) +- [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861): allow overriding the introspection endpoint. ([\#17406](https://github.com/element-hq/synapse/issues/17406)) + +### Bugfixes + +- Fix rare race which caused no new to-device messages to be received from remote server. ([\#17362](https://github.com/element-hq/synapse/issues/17362)) +- Fix bug in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint when using an old database. ([\#17398](https://github.com/element-hq/synapse/issues/17398)) + +### Improved Documentation + +- Clarify that `url_preview_url_blacklist` is a usability feature. ([\#17356](https://github.com/element-hq/synapse/issues/17356)) +- Fix broken links in README. ([\#17379](https://github.com/element-hq/synapse/issues/17379)) +- Clarify that changelog content *and file extension* need to match in order for entries to merge. ([\#17399](https://github.com/element-hq/synapse/issues/17399)) + +### Internal Changes + +- Make the release script create a release branch for Complement as well. ([\#17318](https://github.com/element-hq/synapse/issues/17318)) +- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363)) +- Add CI check for the README. ([\#17367](https://github.com/element-hq/synapse/issues/17367)) +- Fix linting errors from new `ruff` version. ([\#17381](https://github.com/element-hq/synapse/issues/17381), [\#17411](https://github.com/element-hq/synapse/issues/17411)) +- Fix building debian packages on non-clean checkouts. ([\#17390](https://github.com/element-hq/synapse/issues/17390)) +- Finish up work to allow per-user feature flags. ([\#17392](https://github.com/element-hq/synapse/issues/17392), [\#17410](https://github.com/element-hq/synapse/issues/17410)) +- Allow enabling sliding sync per-user. ([\#17393](https://github.com/element-hq/synapse/issues/17393)) + + + +### Updates to locked dependencies + +* Bump certifi from 2023.7.22 to 2024.7.4. ([\#17404](https://github.com/element-hq/synapse/issues/17404)) +* Bump cryptography from 42.0.7 to 42.0.8. ([\#17382](https://github.com/element-hq/synapse/issues/17382)) +* Bump ijson from 3.2.3 to 3.3.0. ([\#17413](https://github.com/element-hq/synapse/issues/17413)) +* Bump log from 0.4.21 to 0.4.22. ([\#17384](https://github.com/element-hq/synapse/issues/17384)) +* Bump mypy-zope from 1.0.4 to 1.0.5. ([\#17414](https://github.com/element-hq/synapse/issues/17414)) +* Bump pillow from 10.3.0 to 10.4.0. ([\#17412](https://github.com/element-hq/synapse/issues/17412)) +* Bump pydantic from 2.7.1 to 2.8.2. ([\#17415](https://github.com/element-hq/synapse/issues/17415)) +* Bump ruff from 0.3.7 to 0.5.0. ([\#17381](https://github.com/element-hq/synapse/issues/17381)) +* Bump serde from 1.0.203 to 1.0.204. ([\#17409](https://github.com/element-hq/synapse/issues/17409)) +* Bump serde_json from 1.0.117 to 1.0.120. ([\#17385](https://github.com/element-hq/synapse/issues/17385), [\#17408](https://github.com/element-hq/synapse/issues/17408)) +* Bump types-setuptools from 69.5.0.20240423 to 70.1.0.20240627. ([\#17380](https://github.com/element-hq/synapse/issues/17380)) + +# Synapse 1.110.0 (2024-07-03) + +No significant changes since 1.110.0rc3. + + + + +# Synapse 1.110.0rc3 (2024-07-02) + +### Bugfixes + +- Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0. ([\#17386](https://github.com/element-hq/synapse/issues/17386), [\#17391](https://github.com/element-hq/synapse/issues/17391)) + +### Internal Changes + +- Limit size of presence EDUs to 50 entries. ([\#17371](https://github.com/element-hq/synapse/issues/17371)) +- Fix building debian package for debian sid. ([\#17389](https://github.com/element-hq/synapse/issues/17389)) + + + + +# Synapse 1.110.0rc2 (2024-06-26) + +### Internal Changes + +- Fix uploading packages to PyPi. ([\#17363](https://github.com/element-hq/synapse/issues/17363)) + + + + +# Synapse 1.110.0rc1 (2024-06-26) + +### Features + +- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17187](https://github.com/element-hq/synapse/issues/17187)) +- Add experimental support for [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823) - Account suspension. ([\#17255](https://github.com/element-hq/synapse/issues/17255)) +- Improve ratelimiting in Synapse. ([\#17256](https://github.com/element-hq/synapse/issues/17256)) +- Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. ([\#17270](https://github.com/element-hq/synapse/issues/17270), [\#17296](https://github.com/element-hq/synapse/issues/17296)) +- Filter for public and empty rooms added to Admin-API [List Room API](https://element-hq.github.io/synapse/latest/admin_api/rooms.html#list-room-api). ([\#17276](https://github.com/element-hq/synapse/issues/17276)) +- Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17277](https://github.com/element-hq/synapse/issues/17277)) +- Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17281](https://github.com/element-hq/synapse/issues/17281)) +- Include user membership in events served to clients, per [MSC4115](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17282](https://github.com/element-hq/synapse/issues/17282)) +- Do not require user-interactive authentication for uploading cross-signing keys for the first time, per [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967). ([\#17284](https://github.com/element-hq/synapse/issues/17284)) +- Add `stream_ordering` sort to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17293](https://github.com/element-hq/synapse/issues/17293)) +- `register_new_matrix_user` now supports a --password-file flag, which + is useful for scripting. ([\#17294](https://github.com/element-hq/synapse/issues/17294)) +- `register_new_matrix_user` now supports a --exists-ok flag to allow registration of users that already exist in the database. + This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304)) +- Add support for via query parameter from [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156). ([\#17322](https://github.com/element-hq/synapse/issues/17322)) +- Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335)) +- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350)) + +### Bugfixes + +- Fix searching for users with their exact localpart whose ID includes a hyphen. ([\#17254](https://github.com/element-hq/synapse/issues/17254)) +- Fix wrong retention policy being used when filtering events. ([\#17272](https://github.com/element-hq/synapse/issues/17272)) +- Fix bug where OTKs were not always included in `/sync` response when using workers. ([\#17275](https://github.com/element-hq/synapse/issues/17275)) +- Fix a long-standing bug where an invalid 'from' parameter to [`/notifications`](https://spec.matrix.org/v1.10/client-server-api/#get_matrixclientv3notifications) would result in an Internal Server Error. ([\#17283](https://github.com/element-hq/synapse/issues/17283)) +- Fix edge case in `/sync` returning the wrong the state when using sharded event persisters. ([\#17295](https://github.com/element-hq/synapse/issues/17295)) +- Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17301](https://github.com/element-hq/synapse/issues/17301)) +- Fix email notification subject when invited to a space. ([\#17336](https://github.com/element-hq/synapse/issues/17336)) + +### Improved Documentation + +- Add missing quotes for example for `exclude_rooms_from_sync`. ([\#17308](https://github.com/element-hq/synapse/issues/17308)) +- Update header in the README to visually fix the the auto-generated table of contents. ([\#17329](https://github.com/element-hq/synapse/issues/17329)) +- Fix stale references to the Foundation's Security Disclosure Policy. ([\#17341](https://github.com/element-hq/synapse/issues/17341)) +- Add default values for `rc_invites.per_issuer` to docs. ([\#17347](https://github.com/element-hq/synapse/issues/17347)) +- Fix an error in the docs for `search_all_users` parameter under `user_directory`. ([\#17348](https://github.com/element-hq/synapse/issues/17348)) + +### Internal Changes + +- Remove unused `expire_access_token` option in the Synapse Docker config file. Contributed by @AaronDewes. ([\#17198](https://github.com/element-hq/synapse/issues/17198)) +- Use fully-qualified `PersistedEventPosition` when returning `RoomsForUser` to facilitate proper comparisons and `RoomStreamToken` generation. ([\#17265](https://github.com/element-hq/synapse/issues/17265)) +- Add debug logging for when room keys are uploaded, including whether they are replacing other room keys. ([\#17266](https://github.com/element-hq/synapse/issues/17266)) +- Handle OTK uploads off master. ([\#17271](https://github.com/element-hq/synapse/issues/17271)) +- Don't try and resync devices for remote users whose servers are marked as down. ([\#17273](https://github.com/element-hq/synapse/issues/17273)) +- Re-organize Pydantic models and types used in handlers. ([\#17279](https://github.com/element-hq/synapse/issues/17279)) +- Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`. ([\#17300](https://github.com/element-hq/synapse/issues/17300)) +- Update the README with Element branding, improve headers and fix the #synapse:matrix.org support room link rendering. ([\#17324](https://github.com/element-hq/synapse/issues/17324)) +- Change path of the experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync implementation to `/org.matrix.simplified_msc3575/sync` since our simplified API is slightly incompatible with what's in the current MSC. ([\#17331](https://github.com/element-hq/synapse/issues/17331)) +- Handle device lists notifications for large accounts more efficiently in worker mode. ([\#17333](https://github.com/element-hq/synapse/issues/17333), [\#17358](https://github.com/element-hq/synapse/issues/17358)) +- Do not block event sending/receiving while calculating large event auth chains. ([\#17338](https://github.com/element-hq/synapse/issues/17338)) +- Tidy up `parse_integer` docs and call sites to reflect the fact that they require non-negative integers by default, and bring `parse_integer_from_args` default in alignment. Contributed by Denis Kasak (@dkasak). ([\#17339](https://github.com/element-hq/synapse/issues/17339)) + + + +### Updates to locked dependencies + +* Bump authlib from 1.3.0 to 1.3.1. ([\#17343](https://github.com/element-hq/synapse/issues/17343)) +* Bump dawidd6/action-download-artifact from 3.1.4 to 5. ([\#17289](https://github.com/element-hq/synapse/issues/17289)) +* Bump dawidd6/action-download-artifact from 5 to 6. ([\#17313](https://github.com/element-hq/synapse/issues/17313)) +* Bump docker/build-push-action from 5 to 6. ([\#17312](https://github.com/element-hq/synapse/issues/17312)) +* Bump jinja2 from 3.1.3 to 3.1.4. ([\#17287](https://github.com/element-hq/synapse/issues/17287)) +* Bump lazy_static from 1.4.0 to 1.5.0. ([\#17355](https://github.com/element-hq/synapse/issues/17355)) +* Bump msgpack from 1.0.7 to 1.0.8. ([\#17317](https://github.com/element-hq/synapse/issues/17317)) +* Bump netaddr from 1.2.1 to 1.3.0. ([\#17353](https://github.com/element-hq/synapse/issues/17353)) +* Bump packaging from 24.0 to 24.1. ([\#17352](https://github.com/element-hq/synapse/issues/17352)) +* Bump phonenumbers from 8.13.37 to 8.13.39. ([\#17315](https://github.com/element-hq/synapse/issues/17315)) +* Bump regex from 1.10.4 to 1.10.5. ([\#17290](https://github.com/element-hq/synapse/issues/17290)) +* Bump requests from 2.31.0 to 2.32.2. ([\#17345](https://github.com/element-hq/synapse/issues/17345)) +* Bump sentry-sdk from 2.1.1 to 2.3.1. ([\#17263](https://github.com/element-hq/synapse/issues/17263)) +* Bump sentry-sdk from 2.3.1 to 2.6.0. ([\#17351](https://github.com/element-hq/synapse/issues/17351)) +* Bump tornado from 6.4 to 6.4.1. ([\#17344](https://github.com/element-hq/synapse/issues/17344)) +* Bump mypy from 1.8.0 to 1.9.0. ([\#17297](https://github.com/element-hq/synapse/issues/17297)) +* Bump types-jsonschema from 4.21.0.20240311 to 4.22.0.20240610. ([\#17288](https://github.com/element-hq/synapse/issues/17288)) +* Bump types-netaddr from 1.2.0.20240219 to 1.3.0.20240530. ([\#17314](https://github.com/element-hq/synapse/issues/17314)) +* Bump types-pillow from 10.2.0.20240423 to 10.2.0.20240520. ([\#17285](https://github.com/element-hq/synapse/issues/17285)) +* Bump types-pyyaml from 6.0.12.12 to 6.0.12.20240311. ([\#17316](https://github.com/element-hq/synapse/issues/17316)) +* Bump typing-extensions from 4.11.0 to 4.12.2. ([\#17354](https://github.com/element-hq/synapse/issues/17354)) +* Bump urllib3 from 2.0.7 to 2.2.2. ([\#17346](https://github.com/element-hq/synapse/issues/17346)) + +# Synapse 1.109.0 (2024-06-18) + +### Internal Changes + +- Fix the building of binary wheels for macOS by switching to macOS 12 CI runners. ([\#17319](https://github.com/element-hq/synapse/issues/17319)) + + + + +# Synapse 1.109.0rc3 (2024-06-17) + +### Bugfixes + +- When rolling back to a previous Synapse version and then forwards again to this release, don't require server operators to manually run SQL. ([\#17305](https://github.com/element-hq/synapse/issues/17305), [\#17309](https://github.com/element-hq/synapse/issues/17309)) + +### Internal Changes + +- Use the release branch for sytest in release-branch PRs. ([\#17306](https://github.com/element-hq/synapse/issues/17306)) + + + + +# Synapse 1.109.0rc2 (2024-06-11) + +### Bugfixes + +- Fix bug where one-time-keys were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275)) +- Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. ([\#17292](https://github.com/element-hq/synapse/issues/17292)) + + + + +# Synapse 1.109.0rc1 (2024-06-04) + +### Features + +- Add the ability to auto-accept invites on the behalf of users. See the [`auto_accept_invites`](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#auto-accept-invites) config option for details. ([\#17147](https://github.com/element-hq/synapse/issues/17147)) +- Add experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync/e2ee` endpoint for to-device messages and device encryption info. ([\#17167](https://github.com/element-hq/synapse/issues/17167)) +- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/issues/3916) by adding unstable media endpoints to `/_matrix/client`. ([\#17213](https://github.com/element-hq/synapse/issues/17213)) +- Add logging to tasks managed by the task scheduler, showing CPU and database usage. ([\#17219](https://github.com/element-hq/synapse/issues/17219)) + +### Bugfixes + +- Fix deduplicating of membership events to not create unused state groups. ([\#17164](https://github.com/element-hq/synapse/issues/17164)) +- Fix bug where duplicate events could be sent down sync when using workers that are overloaded. ([\#17215](https://github.com/element-hq/synapse/issues/17215)) +- Ignore attempts to send to-device messages to bad users, to avoid log spam when we try to connect to the bad server. ([\#17240](https://github.com/element-hq/synapse/issues/17240)) +- Fix handling of duplicate concurrent uploading of device one-time-keys. ([\#17241](https://github.com/element-hq/synapse/issues/17241)) +- Fix reporting of default tags to Sentry, such as worker name. Broke in v1.108.0. ([\#17251](https://github.com/element-hq/synapse/issues/17251)) +- Fix bug where typing updates would not be sent when using workers after a restart. ([\#17252](https://github.com/element-hq/synapse/issues/17252)) + +### Improved Documentation + +- Update the LemonLDAP documentation to say that claims should be explicitly included in the returned `id_token`, as Synapse won't request them. ([\#17204](https://github.com/element-hq/synapse/issues/17204)) + +### Internal Changes + +- Improve DB usage when fetching related events. ([\#17083](https://github.com/element-hq/synapse/issues/17083)) +- Log exceptions when failing to auto-join new user according to the `auto_join_rooms` option. ([\#17176](https://github.com/element-hq/synapse/issues/17176)) +- Reduce work of calculating outbound device lists updates. ([\#17211](https://github.com/element-hq/synapse/issues/17211)) +- Improve performance of calculating device lists changes in `/sync`. ([\#17216](https://github.com/element-hq/synapse/issues/17216)) +- Move towards using `MultiWriterIdGenerator` everywhere. ([\#17226](https://github.com/element-hq/synapse/issues/17226)) +- Replaces all usages of `StreamIdGenerator` with `MultiWriterIdGenerator`. ([\#17229](https://github.com/element-hq/synapse/issues/17229)) +- Change the `allow_unsafe_locale` config option to also apply when setting up new databases. ([\#17238](https://github.com/element-hq/synapse/issues/17238)) +- Fix errors in logs about closing incorrect logging contexts when media gets rejected by a module. ([\#17239](https://github.com/element-hq/synapse/issues/17239), [\#17246](https://github.com/element-hq/synapse/issues/17246)) +- Clean out invalid destinations from `device_federation_outbox` table. ([\#17242](https://github.com/element-hq/synapse/issues/17242)) +- Stop logging errors when receiving invalid User IDs in key querys requests. ([\#17250](https://github.com/element-hq/synapse/issues/17250)) + + + +### Updates to locked dependencies + +* Bump anyhow from 1.0.83 to 1.0.86. ([\#17220](https://github.com/element-hq/synapse/issues/17220)) +* Bump bcrypt from 4.1.2 to 4.1.3. ([\#17224](https://github.com/element-hq/synapse/issues/17224)) +* Bump lxml from 5.2.1 to 5.2.2. ([\#17261](https://github.com/element-hq/synapse/issues/17261)) +* Bump mypy-zope from 1.0.3 to 1.0.4. ([\#17262](https://github.com/element-hq/synapse/issues/17262)) +* Bump phonenumbers from 8.13.35 to 8.13.37. ([\#17235](https://github.com/element-hq/synapse/issues/17235)) +* Bump prometheus-client from 0.19.0 to 0.20.0. ([\#17233](https://github.com/element-hq/synapse/issues/17233)) +* Bump pyasn1 from 0.5.1 to 0.6.0. ([\#17223](https://github.com/element-hq/synapse/issues/17223)) +* Bump pyicu from 2.13 to 2.13.1. ([\#17236](https://github.com/element-hq/synapse/issues/17236)) +* Bump pyopenssl from 24.0.0 to 24.1.0. ([\#17234](https://github.com/element-hq/synapse/issues/17234)) +* Bump serde from 1.0.201 to 1.0.202. ([\#17221](https://github.com/element-hq/synapse/issues/17221)) +* Bump serde from 1.0.202 to 1.0.203. ([\#17232](https://github.com/element-hq/synapse/issues/17232)) +* Bump twine from 5.0.0 to 5.1.0. ([\#17225](https://github.com/element-hq/synapse/issues/17225)) +* Bump types-psycopg2 from 2.9.21.20240311 to 2.9.21.20240417. ([\#17222](https://github.com/element-hq/synapse/issues/17222)) +* Bump types-pyopenssl from 24.0.0.20240311 to 24.1.0.20240425. ([\#17260](https://github.com/element-hq/synapse/issues/17260)) + +# Synapse 1.108.0 (2024-05-28) + +No significant changes since 1.108.0rc1. + + + + +# Synapse 1.108.0rc1 (2024-05-21) + +### Features + +- Add a feature that allows clients to query the configured federation whitelist. Disabled by default. ([\#16848](https://github.com/element-hq/synapse/issues/16848), [\#17199](https://github.com/element-hq/synapse/issues/17199)) +- Add the ability to allow numeric user IDs with a specific prefix when in the CAS flow. Contributed by Aurélien Grimpard. ([\#17098](https://github.com/element-hq/synapse/issues/17098)) + +### Bugfixes + +- Fix bug where push rules would be empty in `/sync` for some accounts. Introduced in v1.93.0. ([\#17142](https://github.com/element-hq/synapse/issues/17142)) +- Add support for optional whitespace around the Federation API's `Authorization` header's parameter commas. ([\#17145](https://github.com/element-hq/synapse/issues/17145)) +- Fix bug where disabling room publication prevented public rooms being created on workers. ([\#17177](https://github.com/element-hq/synapse/issues/17177), [\#17184](https://github.com/element-hq/synapse/issues/17184)) + +### Improved Documentation + +- Document [`/v1/make_knock`](https://spec.matrix.org/v1.10/server-server-api/#get_matrixfederationv1make_knockroomiduserid) and [`/v1/send_knock/`](https://spec.matrix.org/v1.10/server-server-api/#put_matrixfederationv1send_knockroomideventid) federation endpoints as worker-compatible. ([\#17058](https://github.com/element-hq/synapse/issues/17058)) +- Update User Admin API with note about prefixing OIDC external_id providers. ([\#17139](https://github.com/element-hq/synapse/issues/17139)) +- Clarify the state of the created room when using the `autocreate_auto_join_room_preset` config option. ([\#17150](https://github.com/element-hq/synapse/issues/17150)) +- Update the Admin FAQ with the current libjemalloc version for latest Debian stable. Additionally update the name of the "push_rules" stream in the Workers documentation. ([\#17171](https://github.com/element-hq/synapse/issues/17171)) + +### Internal Changes + +- Add note to reflect that [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) is closed but will remain supported for some time. ([\#17151](https://github.com/element-hq/synapse/issues/17151)) +- Update dependency PyO3 to 0.21. ([\#17162](https://github.com/element-hq/synapse/issues/17162)) +- Fixes linter errors found in PR #17147. ([\#17166](https://github.com/element-hq/synapse/issues/17166)) +- Bump black from 24.2.0 to 24.4.2. ([\#17170](https://github.com/element-hq/synapse/issues/17170)) +- Cache literal sync filter validation for performance. ([\#17186](https://github.com/element-hq/synapse/issues/17186)) +- Improve performance by fixing a reactor pause. ([\#17192](https://github.com/element-hq/synapse/issues/17192)) +- Route `/make_knock` and `/send_knock` federation APIs to the federation reader worker in Complement test runs. ([\#17195](https://github.com/element-hq/synapse/issues/17195)) +- Prepare sync handler to be able to return different sync responses (`SyncVersion`). ([\#17200](https://github.com/element-hq/synapse/issues/17200)) +- Organize the sync cache key parameter outside of the sync config (separate concerns). ([\#17201](https://github.com/element-hq/synapse/issues/17201)) +- Refactor `SyncResultBuilder` assembly to its own function. ([\#17202](https://github.com/element-hq/synapse/issues/17202)) +- Rename to be obvious: `joined_rooms` -> `joined_room_ids`. ([\#17203](https://github.com/element-hq/synapse/issues/17203), [\#17208](https://github.com/element-hq/synapse/issues/17208)) +- Add a short pause when rate-limiting a request. ([\#17210](https://github.com/element-hq/synapse/issues/17210)) + + + +### Updates to locked dependencies + +* Bump cryptography from 42.0.5 to 42.0.7. ([\#17180](https://github.com/element-hq/synapse/issues/17180)) +* Bump gitpython from 3.1.41 to 3.1.43. ([\#17181](https://github.com/element-hq/synapse/issues/17181)) +* Bump immutabledict from 4.1.0 to 4.2.0. ([\#17179](https://github.com/element-hq/synapse/issues/17179)) +* Bump sentry-sdk from 1.40.3 to 2.1.1. ([\#17178](https://github.com/element-hq/synapse/issues/17178)) +* Bump serde from 1.0.200 to 1.0.201. ([\#17183](https://github.com/element-hq/synapse/issues/17183)) +* Bump serde_json from 1.0.116 to 1.0.117. ([\#17182](https://github.com/element-hq/synapse/issues/17182)) + +Synapse 1.107.0 (2024-05-14) +============================ + +No significant changes since 1.107.0rc1. + + +# Synapse 1.107.0rc1 (2024-05-07) + +### Features + +- Add preliminary support for [MSC3823: Account Suspension](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#17051](https://github.com/element-hq/synapse/issues/17051)) +- Declare support for [Matrix v1.10](https://matrix.org/blog/2024/03/22/matrix-v1.10-release/). Contributed by @clokep. ([\#17082](https://github.com/element-hq/synapse/issues/17082)) +- Add support for [MSC4115: membership metadata on events](https://github.com/matrix-org/matrix-spec-proposals/pull/4115). ([\#17104](https://github.com/element-hq/synapse/issues/17104), [\#17137](https://github.com/element-hq/synapse/issues/17137)) + +### Bugfixes + +- Fixed search feature of Element Android on homesevers using SQLite by returning search terms as search highlights. ([\#17000](https://github.com/element-hq/synapse/issues/17000)) +- Fixes a bug introduced in v1.52.0 where the `destination` query parameter for the [Destination Rooms Admin API](https://element-hq.github.io/synapse/v1.105/usage/administration/admin_api/federation.html#destination-rooms) failed to actually filter returned rooms. ([\#17077](https://github.com/element-hq/synapse/issues/17077)) +- For MSC3266 room summaries, support queries at the recommended endpoint of `/_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}`. The existing endpoint of `/_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` is deprecated. ([\#17078](https://github.com/element-hq/synapse/issues/17078)) +- Apply user email & picture during OIDC registration if present & selected. ([\#17120](https://github.com/element-hq/synapse/issues/17120)) +- Improve error message for cross signing reset with [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) enabled. ([\#17121](https://github.com/element-hq/synapse/issues/17121)) +- Fix a bug which meant that to-device messages received over federation could be dropped when the server was under load or networking problems caused problems between Synapse processes or the database. ([\#17127](https://github.com/element-hq/synapse/issues/17127)) +- Fix bug where `StreamChangeCache` would not respect configured cache factors. ([\#17152](https://github.com/element-hq/synapse/issues/17152)) + +### Updates to the Docker image + +- Correct licensing metadata on Docker image. ([\#17141](https://github.com/element-hq/synapse/issues/17141)) + +### Improved Documentation + +- Update the `event_cache_size` and `global_factor` configuration options' documentation. ([\#17071](https://github.com/element-hq/synapse/issues/17071)) +- Remove broken sphinx docs. ([\#17073](https://github.com/element-hq/synapse/issues/17073), [\#17148](https://github.com/element-hq/synapse/issues/17148)) +- Add RuntimeDirectory to example matrix-synapse.service systemd unit. ([\#17084](https://github.com/element-hq/synapse/issues/17084)) +- Fix various small typos throughout the docs. ([\#17114](https://github.com/element-hq/synapse/issues/17114)) +- Update enable_notifs configuration documentation. ([\#17116](https://github.com/element-hq/synapse/issues/17116)) +- Update the Upgrade Notes with the latest minimum supported Rust version of 1.66.0. Contributed by @jahway603. ([\#17140](https://github.com/element-hq/synapse/issues/17140)) + +### Internal Changes + +- Enable [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) by default in the Synapse Complement image. ([\#17105](https://github.com/element-hq/synapse/issues/17105)) +- Add optimisation to `StreamChangeCache.get_entities_changed(..)`. ([\#17130](https://github.com/element-hq/synapse/issues/17130)) + + + +### Updates to locked dependencies + +* Bump furo from 2024.1.29 to 2024.4.27. ([\#17133](https://github.com/element-hq/synapse/issues/17133)) +* Bump idna from 3.6 to 3.7. ([\#17136](https://github.com/element-hq/synapse/issues/17136)) +* Bump jsonschema from 4.21.1 to 4.22.0. ([\#17157](https://github.com/element-hq/synapse/issues/17157)) +* Bump lxml from 5.1.0 to 5.2.1. ([\#17158](https://github.com/element-hq/synapse/issues/17158)) +* Bump phonenumbers from 8.13.29 to 8.13.35. ([\#17106](https://github.com/element-hq/synapse/issues/17106)) +- Bump pillow from 10.2.0 to 10.3.0. ([\#17146](https://github.com/element-hq/synapse/issues/17146)) +* Bump pydantic from 2.6.4 to 2.7.0. ([\#17107](https://github.com/element-hq/synapse/issues/17107)) +* Bump pydantic from 2.7.0 to 2.7.1. ([\#17160](https://github.com/element-hq/synapse/issues/17160)) +* Bump pyicu from 2.12 to 2.13. ([\#17109](https://github.com/element-hq/synapse/issues/17109)) +* Bump serde from 1.0.197 to 1.0.198. ([\#17111](https://github.com/element-hq/synapse/issues/17111)) +* Bump serde from 1.0.198 to 1.0.199. ([\#17132](https://github.com/element-hq/synapse/issues/17132)) +* Bump serde from 1.0.199 to 1.0.200. ([\#17161](https://github.com/element-hq/synapse/issues/17161)) +* Bump serde_json from 1.0.115 to 1.0.116. ([\#17112](https://github.com/element-hq/synapse/issues/17112)) +- Update `tornado` Python dependency from 6.2 to 6.4. ([\#17131](https://github.com/element-hq/synapse/issues/17131)) +* Bump twisted from 23.10.0 to 24.3.0. ([\#17135](https://github.com/element-hq/synapse/issues/17135)) +* Bump types-bleach from 6.1.0.1 to 6.1.0.20240331. ([\#17110](https://github.com/element-hq/synapse/issues/17110)) +* Bump types-pillow from 10.2.0.20240415 to 10.2.0.20240423. ([\#17159](https://github.com/element-hq/synapse/issues/17159)) +* Bump types-setuptools from 69.0.0.20240125 to 69.5.0.20240423. ([\#17134](https://github.com/element-hq/synapse/issues/17134)) + +# Synapse 1.106.0 (2024-04-30) + +No significant changes since 1.106.0rc1. + + + + +# Synapse 1.106.0rc1 (2024-04-25) + +### Features + +- Send an email if the address is already bound to an user account. ([\#16819](https://github.com/element-hq/synapse/issues/16819)) +- Implement the rendezvous mechanism described by [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/issues/4108). ([\#17056](https://github.com/element-hq/synapse/issues/17056)) +- Support delegating the rendezvous mechanism described [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/issues/4108) to an external implementation. ([\#17086](https://github.com/element-hq/synapse/issues/17086)) + +### Bugfixes + +- Add validation to ensure that the `limit` parameter on `/publicRooms` is non-negative. ([\#16920](https://github.com/element-hq/synapse/issues/16920)) +- Return `400 M_NOT_JSON` upon receiving invalid JSON in query parameters across various client and admin endpoints, rather than an internal server error. ([\#16923](https://github.com/element-hq/synapse/issues/16923)) +- Make the CSAPI endpoint `/keys/device_signing/upload` idempotent. ([\#16943](https://github.com/element-hq/synapse/issues/16943)) +- Redact membership events if the user requested erasure upon deactivating. ([\#17076](https://github.com/element-hq/synapse/issues/17076)) + +### Improved Documentation + +- Add a prompt in the contributing guide to manually configure icu4c. ([\#17069](https://github.com/element-hq/synapse/issues/17069)) +- Clarify what part of message retention is still experimental. ([\#17099](https://github.com/element-hq/synapse/issues/17099)) + +### Internal Changes + +- Use new receipts column to optimise receipt and push action SQL queries. Contributed by Nick @ Beeper (@fizzadar). ([\#17032](https://github.com/element-hq/synapse/issues/17032), [\#17096](https://github.com/element-hq/synapse/issues/17096)) +- Fix mypy with latest Twisted release. ([\#17036](https://github.com/element-hq/synapse/issues/17036)) +- Bump minimum supported Rust version to 1.66.0. ([\#17079](https://github.com/element-hq/synapse/issues/17079)) +- Add helpers to transform Twisted requests to Rust http Requests/Responses. ([\#17081](https://github.com/element-hq/synapse/issues/17081)) +- Fix type annotation for `visited_chains` after `mypy` upgrade. ([\#17125](https://github.com/element-hq/synapse/issues/17125)) + + + +### Updates to locked dependencies + +* Bump anyhow from 1.0.81 to 1.0.82. ([\#17095](https://github.com/element-hq/synapse/issues/17095)) +* Bump peaceiris/actions-gh-pages from 3.9.3 to 4.0.0. ([\#17087](https://github.com/element-hq/synapse/issues/17087)) +* Bump peaceiris/actions-mdbook from 1.2.0 to 2.0.0. ([\#17089](https://github.com/element-hq/synapse/issues/17089)) +* Bump pyasn1-modules from 0.3.0 to 0.4.0. ([\#17093](https://github.com/element-hq/synapse/issues/17093)) +* Bump pygithub from 2.2.0 to 2.3.0. ([\#17092](https://github.com/element-hq/synapse/issues/17092)) +* Bump ruff from 0.3.5 to 0.3.7. ([\#17094](https://github.com/element-hq/synapse/issues/17094)) +* Bump sigstore/cosign-installer from 3.4.0 to 3.5.0. ([\#17088](https://github.com/element-hq/synapse/issues/17088)) +* Bump twine from 4.0.2 to 5.0.0. ([\#17091](https://github.com/element-hq/synapse/issues/17091)) +* Bump types-pillow from 10.2.0.20240406 to 10.2.0.20240415. ([\#17090](https://github.com/element-hq/synapse/issues/17090)) + +# Synapse 1.105.1 (2024-04-23) + +## Security advisory + +The following issues are fixed in 1.105.1. + +- [GHSA-3h7q-rfh9-xm4v](https://github.com/element-hq/synapse/security/advisories/GHSA-3h7q-rfh9-xm4v) / [CVE-2024-31208](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-31208) — High Severity + + Weakness in auth chain indexing allows DoS from remote room members through disk fill and high CPU usage. + +See the advisories for more details. If you have any questions, email security@element.io. + + + +# Synapse 1.105.0 (2024-04-16) + +No significant changes since 1.105.0rc1. + + + + +# Synapse 1.105.0rc1 (2024-04-11) + +### Features + +- Stabilize support for [MSC4010](https://github.com/matrix-org/matrix-spec-proposals/pull/4010) which clarifies the interaction of push rules and account data. Contributed by @clokep. ([\#17022](https://github.com/element-hq/synapse/issues/17022)) +- Stabilize support for [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981): `/relations` recursion. Contributed by @clokep. ([\#17023](https://github.com/element-hq/synapse/issues/17023)) +- Add support for moving `/pushrules` off of main process. ([\#17037](https://github.com/element-hq/synapse/issues/17037), [\#17038](https://github.com/element-hq/synapse/issues/17038)) + +### Bugfixes + +- Fix various long-standing bugs which could cause incorrect state to be returned from `/sync` in certain situations. ([\#16930](https://github.com/element-hq/synapse/issues/16930), [\#16932](https://github.com/element-hq/synapse/issues/16932), [\#16942](https://github.com/element-hq/synapse/issues/16942), [\#17064](https://github.com/element-hq/synapse/issues/17064), [\#17065](https://github.com/element-hq/synapse/issues/17065), [\#17066](https://github.com/element-hq/synapse/issues/17066)) +- Fix server notice rooms not always being created as unencrypted rooms, even when `encryption_enabled_by_default_for_room_type` is in use (server notices are always unencrypted). ([\#17033](https://github.com/element-hq/synapse/issues/17033)) +- Fix the `.m.rule.encrypted_room_one_to_one` and `.m.rule.room_one_to_one` default underride push rules being in the wrong order. Contributed by @Sumpy1. ([\#17043](https://github.com/element-hq/synapse/issues/17043)) + +### Internal Changes + +- Refactor auth chain fetching to reduce duplication. ([\#17044](https://github.com/element-hq/synapse/issues/17044)) +- Improve database performance by adding a missing index to `access_tokens.refresh_token_id`. ([\#17045](https://github.com/element-hq/synapse/issues/17045), [\#17054](https://github.com/element-hq/synapse/issues/17054)) +- Improve database performance by reducing number of receipts fetched when sending push notifications. ([\#17049](https://github.com/element-hq/synapse/issues/17049)) + + + +### Updates to locked dependencies + +* Bump packaging from 23.2 to 24.0. ([\#17027](https://github.com/element-hq/synapse/issues/17027)) +* Bump regex from 1.10.3 to 1.10.4. ([\#17028](https://github.com/element-hq/synapse/issues/17028)) +* Bump ruff from 0.3.2 to 0.3.5. ([\#17060](https://github.com/element-hq/synapse/issues/17060)) +* Bump serde_json from 1.0.114 to 1.0.115. ([\#17041](https://github.com/element-hq/synapse/issues/17041)) +* Bump types-pillow from 10.2.0.20240125 to 10.2.0.20240406. ([\#17061](https://github.com/element-hq/synapse/issues/17061)) +* Bump types-requests from 2.31.0.20240125 to 2.31.0.20240406. ([\#17063](https://github.com/element-hq/synapse/issues/17063)) +* Bump typing-extensions from 4.9.0 to 4.11.0. ([\#17062](https://github.com/element-hq/synapse/issues/17062)) + +# Synapse 1.104.0 (2024-04-02) + +### Bugfixes + +- Fix regression when using OIDC provider. Introduced in v1.104.0rc1. ([\#17031](https://github.com/element-hq/synapse/issues/17031)) + + +# Synapse 1.104.0rc1 (2024-03-26) + +### Features + +- Add an OIDC config to specify extra parameters for the authorization grant URL. IT can be useful to pass an ACR value for example. ([\#16971](https://github.com/element-hq/synapse/issues/16971)) +- Add support for OIDC provider returning JWT. ([\#16972](https://github.com/element-hq/synapse/issues/16972), [\#17031](https://github.com/element-hq/synapse/issues/17031)) + +### Bugfixes + +- Fix a bug which meant that, under certain circumstances, we might never retry sending events or to-device messages over federation after a failure. ([\#16925](https://github.com/element-hq/synapse/issues/16925)) +- Fix various long-standing bugs which could cause incorrect state to be returned from `/sync` in certain situations. ([\#16949](https://github.com/element-hq/synapse/issues/16949)) +- Fix case in which `m.fully_read` marker would not get updated. Contributed by @SpiritCroc. ([\#16990](https://github.com/element-hq/synapse/issues/16990)) +- Fix bug which did not retract a user's pending knocks at rooms when their account was deactivated. Contributed by @hanadi92. ([\#17010](https://github.com/element-hq/synapse/issues/17010)) + +### Updates to the Docker image + +- Updated `start.py` to generate config using the correct user ID when running as root (fixes [\#16824](https://github.com/element-hq/synapse/issues/16824), [\#15202](https://github.com/element-hq/synapse/issues/15202)). ([\#16978](https://github.com/element-hq/synapse/issues/16978)) + +### Improved Documentation + +- Add a query to force a refresh of a remote user's device list to the "Useful SQL for Admins" documentation page. ([\#16892](https://github.com/element-hq/synapse/issues/16892)) +- Minor grammatical corrections to the upgrade documentation. ([\#16965](https://github.com/element-hq/synapse/issues/16965)) +- Fix the sort order for the documentation version picker, so that newer releases appear above older ones. ([\#16966](https://github.com/element-hq/synapse/issues/16966)) +- Remove recommendation for a specific poetry version from contributing guide. ([\#17002](https://github.com/element-hq/synapse/issues/17002)) + +### Internal Changes + +- Improve lock performance when a lot of locks are all waiting for a single lock to be released. ([\#16840](https://github.com/element-hq/synapse/issues/16840)) +- Update power level default for public rooms. ([\#16907](https://github.com/element-hq/synapse/issues/16907)) +- Improve event validation. ([\#16908](https://github.com/element-hq/synapse/issues/16908)) +- Multi-worker-docker-container: disable log buffering. ([\#16919](https://github.com/element-hq/synapse/issues/16919)) +- Refactor state delta calculation in `/sync` handler. ([\#16929](https://github.com/element-hq/synapse/issues/16929)) +- Clarify docs for some room state functions. ([\#16950](https://github.com/element-hq/synapse/issues/16950)) +- Specify IP subnets in canonical form. ([\#16953](https://github.com/element-hq/synapse/issues/16953)) +- As done for SAML mapping provider, let's pass the module API to the OIDC one so the mapper can do more logic in its code. ([\#16974](https://github.com/element-hq/synapse/issues/16974)) +- Allow containers building on top of Synapse's Complement container is use the included PostgreSQL cluster. ([\#16985](https://github.com/element-hq/synapse/issues/16985)) +- Raise poetry-core version cap to 1.9.0. ([\#16986](https://github.com/element-hq/synapse/issues/16986)) +- Patch the db conn pool sooner in tests. ([\#17017](https://github.com/element-hq/synapse/issues/17017)) + + + +### Updates to locked dependencies + +* Bump anyhow from 1.0.80 to 1.0.81. ([\#17009](https://github.com/element-hq/synapse/issues/17009)) +* Bump black from 23.10.1 to 24.2.0. ([\#16936](https://github.com/element-hq/synapse/issues/16936)) +* Bump cryptography from 41.0.7 to 42.0.5. ([\#16958](https://github.com/element-hq/synapse/issues/16958)) +* Bump dawidd6/action-download-artifact from 3.1.1 to 3.1.2. ([\#16960](https://github.com/element-hq/synapse/issues/16960)) +* Bump dawidd6/action-download-artifact from 3.1.2 to 3.1.4. ([\#17008](https://github.com/element-hq/synapse/issues/17008)) +* Bump jinja2 from 3.1.2 to 3.1.3. ([\#17005](https://github.com/element-hq/synapse/issues/17005)) +* Bump log from 0.4.20 to 0.4.21. ([\#16977](https://github.com/element-hq/synapse/issues/16977)) +* Bump mypy from 1.5.1 to 1.8.0. ([\#16901](https://github.com/element-hq/synapse/issues/16901)) +* Bump netaddr from 0.9.0 to 1.2.1. ([\#17006](https://github.com/element-hq/synapse/issues/17006)) +* Bump pydantic from 2.6.0 to 2.6.4. ([\#17004](https://github.com/element-hq/synapse/issues/17004)) +* Bump pyo3 from 0.20.2 to 0.20.3. ([\#16962](https://github.com/element-hq/synapse/issues/16962)) +* Bump ruff from 0.1.14 to 0.3.2. ([\#16994](https://github.com/element-hq/synapse/issues/16994)) +* Bump serde from 1.0.196 to 1.0.197. ([\#16963](https://github.com/element-hq/synapse/issues/16963)) +* Bump serde_json from 1.0.113 to 1.0.114. ([\#16961](https://github.com/element-hq/synapse/issues/16961)) +* Bump types-jsonschema from 4.21.0.20240118 to 4.21.0.20240311. ([\#17007](https://github.com/element-hq/synapse/issues/17007)) +* Bump types-psycopg2 from 2.9.21.16 to 2.9.21.20240311. ([\#16995](https://github.com/element-hq/synapse/issues/16995)) +* Bump types-pyopenssl from 23.3.0.0 to 24.0.0.20240311. ([\#17003](https://github.com/element-hq/synapse/issues/17003)) + +# Synapse 1.103.0 (2024-03-19) + +No significant changes since 1.103.0rc1. + + + + +# Synapse 1.103.0rc1 (2024-03-12) + +### Features + +- Add a new [List Accounts v3](https://element-hq.github.io/synapse/v1.103/admin_api/user_admin_api.html#list-accounts-v3) Admin API with improved deactivated user filtering capabilities. ([\#16874](https://github.com/element-hq/synapse/issues/16874)) +- Include `Retry-After` header by default per [MSC4041](https://github.com/matrix-org/matrix-spec-proposals/pull/4041). Contributed by @clokep. ([\#16947](https://github.com/element-hq/synapse/issues/16947)) + +### Bugfixes + +- Fix joining remote rooms when a module uses the `on_new_event` callback. This callback may now pass partial state events instead of the full state for remote rooms. Introduced in v1.76.0. ([\#16973](https://github.com/element-hq/synapse/issues/16973)) +- Fix performance issue when joining very large rooms that can cause the server to lock up. Introduced in v1.100.0. Contributed by @ggogel. ([\#16968](https://github.com/element-hq/synapse/issues/16968)) + +### Improved Documentation + +- Add HAProxy example for single port operation to reverse proxy documentation. Contributed by Georg Pfuetzenreuter (@tacerus). ([\#16768](https://github.com/element-hq/synapse/issues/16768)) +- Improve the documentation around running Complement tests with new configuration parameters. ([\#16946](https://github.com/element-hq/synapse/issues/16946)) +- Add docs on upgrading from a very old version. ([\#16951](https://github.com/element-hq/synapse/issues/16951)) + + +### Updates to locked dependencies + +* Bump JasonEtco/create-an-issue from 2.9.1 to 2.9.2. ([\#16934](https://github.com/element-hq/synapse/issues/16934)) +* Bump anyhow from 1.0.79 to 1.0.80. ([\#16935](https://github.com/element-hq/synapse/issues/16935)) +* Bump dawidd6/action-download-artifact from 3.0.0 to 3.1.1. ([\#16933](https://github.com/element-hq/synapse/issues/16933)) +* Bump furo from 2023.9.10 to 2024.1.29. ([\#16939](https://github.com/element-hq/synapse/issues/16939)) +* Bump pyopenssl from 23.3.0 to 24.0.0. ([\#16937](https://github.com/element-hq/synapse/issues/16937)) +* Bump types-netaddr from 0.10.0.20240106 to 1.2.0.20240219. ([\#16938](https://github.com/element-hq/synapse/issues/16938)) + + +# Synapse 1.102.0 (2024-03-05) + +### Bugfixes + +- Revert https://github.com/element-hq/synapse/pull/16756, which caused incorrect notification counts on mobile clients since v1.100.0. ([\#16979](https://github.com/element-hq/synapse/issues/16979)) + + +# Synapse 1.102.0rc1 (2024-02-20) + +### Features + +- A metric was added for emails sent by Synapse, broken down by type: `synapse_emails_sent_total`. Contributed by Remi Rampin. ([\#16881](https://github.com/element-hq/synapse/issues/16881)) + +### Bugfixes + +- Do not send multiple concurrent requests for keys for the same server. ([\#16894](https://github.com/element-hq/synapse/issues/16894)) +- Fix performance issue when joining very large rooms that can cause the server to lock up. Introduced in v1.100.0. ([\#16903](https://github.com/element-hq/synapse/issues/16903)) +- Always prefer unthreaded receipt when >1 exist ([MSC4102](https://github.com/matrix-org/matrix-spec-proposals/pull/4102)). ([\#16927](https://github.com/element-hq/synapse/issues/16927)) + +### Improved Documentation + +- Fix a small typo in the Rooms section of the Admin API documentation. Contributed by @RainerZufall187. ([\#16857](https://github.com/element-hq/synapse/issues/16857)) + +### Internal Changes + +- Don't invalidate the entire event cache when we purge history. ([\#16905](https://github.com/element-hq/synapse/issues/16905)) +- Add experimental config option to not send device list updates for specific users. ([\#16909](https://github.com/element-hq/synapse/issues/16909)) +- Fix incorrect docker hub link in release script. ([\#16910](https://github.com/element-hq/synapse/issues/16910)) + + + +### Updates to locked dependencies + +* Bump attrs from 23.1.0 to 23.2.0. ([\#16899](https://github.com/element-hq/synapse/issues/16899)) +* Bump bcrypt from 4.0.1 to 4.1.2. ([\#16900](https://github.com/element-hq/synapse/issues/16900)) +* Bump pygithub from 2.1.1 to 2.2.0. ([\#16902](https://github.com/element-hq/synapse/issues/16902)) +* Bump sentry-sdk from 1.40.0 to 1.40.3. ([\#16898](https://github.com/element-hq/synapse/issues/16898)) + +# Synapse 1.101.0 (2024-02-13) + +### Bugfixes + +- Fix performance regression when fetching auth chains from the DB. Introduced in v1.100.0. ([\#16893](https://github.com/element-hq/synapse/issues/16893)) + + + + +# Synapse 1.101.0rc1 (2024-02-06) + +### Improved Documentation + +- Fix broken links in the documentation. ([\#16853](https://github.com/element-hq/synapse/issues/16853)) +- Update MacOS installation instructions to mention that libicu is optional. ([\#16854](https://github.com/element-hq/synapse/issues/16854)) +- The version picker now correctly lists versions after `v1.98.0`. ([\#16880](https://github.com/element-hq/synapse/issues/16880)) + +### Internal Changes + +- Add support for stabilised [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981) that adds a `recurse` parameter on the `/relations` API. ([\#16842](https://github.com/element-hq/synapse/issues/16842)) + + + +### Updates to locked dependencies + +* Bump dorny/paths-filter from 2 to 3. ([\#16869](https://github.com/element-hq/synapse/issues/16869)) +* Bump gitpython from 3.1.40 to 3.1.41. ([\#16850](https://github.com/element-hq/synapse/issues/16850)) +* Bump hiredis from 2.2.3 to 2.3.2. ([\#16862](https://github.com/element-hq/synapse/issues/16862)) +* Bump jsonschema from 4.20.0 to 4.21.1. ([\#16887](https://github.com/element-hq/synapse/issues/16887)) +* Bump lxml-stubs from 0.4.0 to 0.5.1. ([\#16885](https://github.com/element-hq/synapse/issues/16885)) +* Bump mypy-zope from 1.0.1 to 1.0.3. ([\#16865](https://github.com/element-hq/synapse/issues/16865)) +* Bump phonenumbers from 8.13.26 to 8.13.29. ([\#16868](https://github.com/element-hq/synapse/issues/16868)) +* Bump pydantic from 2.5.3 to 2.6.0. ([\#16888](https://github.com/element-hq/synapse/issues/16888)) +* Bump sentry-sdk from 1.39.1 to 1.40.0. ([\#16889](https://github.com/element-hq/synapse/issues/16889)) +* Bump serde from 1.0.195 to 1.0.196. ([\#16867](https://github.com/element-hq/synapse/issues/16867)) +* Bump serde_json from 1.0.111 to 1.0.113. ([\#16866](https://github.com/element-hq/synapse/issues/16866)) +* Bump sigstore/cosign-installer from 3.3.0 to 3.4.0. ([\#16890](https://github.com/element-hq/synapse/issues/16890)) +* Bump types-pillow from 10.1.0.2 to 10.2.0.20240125. ([\#16864](https://github.com/element-hq/synapse/issues/16864)) +* Bump types-requests from 2.31.0.10 to 2.31.0.20240125. ([\#16886](https://github.com/element-hq/synapse/issues/16886)) +* Bump types-setuptools from 69.0.0.0 to 69.0.0.20240125. ([\#16863](https://github.com/element-hq/synapse/issues/16863)) + +# Synapse 1.100.0 (2024-01-30) + +No significant changes since 1.100.0rc3. + + + + +# Synapse 1.100.0rc3 (2024-01-24) + +### Bugfixes + +- Fix database performance regression due to changing Postgres table statistics. Introduced in v1.100.0rc1. ([\#16849](https://github.com/element-hq/synapse/issues/16849)) + + + + +# Synapse 1.100.0rc2 (2024-01-24) + +This version is the same as 1.100.0rc1 but with fixes to the release process. + +### Internal Changes + +- Downgrade the `download-artifact` and `upload-artifact` actions to v3 due to breaking changes. ([\#16847](https://github.com/element-hq/synapse/issues/16847)) + + +# Synapse 1.100.0rc1 (2024-01-23) + +*This version was never released to PyPI or the Debian repository due to failures in the automatic part of the release process.* + +### Features + +- Advertise experimental support for [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028) through `/_matrix/clients/versions` if enabled. Contributed by @hanadi92. ([\#16787](https://github.com/element-hq/synapse/issues/16787)) + +### Bugfixes + +- Handle wildcard type filters properly for room messages endpoint. Contributed by Mo Balaa. ([\#14984](https://github.com/element-hq/synapse/issues/14984)) + +### Improved Documentation + +- Add a link to the "Request log format" explainer on the "Logging sample config" documentation page. ([\#16778](https://github.com/element-hq/synapse/issues/16778)) +- Fix broken links in issue templates and documentation. ([\#16810](https://github.com/element-hq/synapse/issues/16810)) +- NGINX listen http2 deprecation in documentation template for reverse proxy. ([\#16831](https://github.com/element-hq/synapse/issues/16831)) + +### Internal Changes + +- Faster partial join to room with complex auth graph. ([\#7](https://github.com/element-hq/synapse/issues/7)) +- Improve DB performance of calculating badge counts for push. ([\#16756](https://github.com/element-hq/synapse/issues/16756)) +- Split up deleting devices into batches. ([\#16766](https://github.com/element-hq/synapse/issues/16766)) +- Remove CI check for sign-off as we require a CLA signature instead. ([\#16776](https://github.com/element-hq/synapse/issues/16776)) +- Ensure CI fails when linting fails to make sure auto-merge does the correct thing. ([\#16781](https://github.com/element-hq/synapse/issues/16781)) +- Faster load recents for sync by reducing amount of state pulled out. ([\#16783](https://github.com/element-hq/synapse/issues/16783)) +- Reduce amount of state pulled out when querying federation hierachy. ([\#16785](https://github.com/element-hq/synapse/issues/16785)) +- Pull less state out of the DB when we retry fetching old events during backfill. ([\#16788](https://github.com/element-hq/synapse/issues/16788)) +- Optimize query for fetching to-device messages in `/sync`. ([\#16805](https://github.com/element-hq/synapse/issues/16805)) +- Reject OIDC config when `client_secret` isn't specified, but the auth method requires one. ([\#16806](https://github.com/element-hq/synapse/issues/16806)) +- Allow room creation but not publishing to continue if room publication rules are violated when creating + a new room. ([\#16811](https://github.com/element-hq/synapse/issues/16811)) +- Bump minimum supported Rust version to 1.65.0. ([\#16818](https://github.com/element-hq/synapse/issues/16818)) +- Fixup copyright lines in file headers after the licensing change. ([\#16820](https://github.com/element-hq/synapse/issues/16820)) +- Add a `--generate-only` option to the internal configuration/launch script for Complement. ([\#16828](https://github.com/element-hq/synapse/issues/16828)) +- Preparatory work for tweaking performance of auth chain lookups. ([\#16833](https://github.com/element-hq/synapse/issues/16833)) +- Speed up e2e device keys queries for bot accounts. ([\#16841](https://github.com/element-hq/synapse/issues/16841)) + +### Updates to locked dependencies + +* Bump actions/cache from 3 to 4. ([\#16832](https://github.com/element-hq/synapse/issues/16832)) +* Bump actions/download-artifact from 3 to 4. ([\#16795](https://github.com/element-hq/synapse/issues/16795)) +* Bump actions/upload-artifact from 3 to 4. ([\#16796](https://github.com/element-hq/synapse/issues/16796)) +* Bump anyhow from 1.0.75 to 1.0.79. ([\#16789](https://github.com/element-hq/synapse/issues/16789)) +* Bump authlib from 1.2.1 to 1.3.0. ([\#16801](https://github.com/element-hq/synapse/issues/16801)) +* Bump dawidd6/action-download-artifact from 2.28.0 to 3.0.0. ([\#16794](https://github.com/element-hq/synapse/issues/16794)) +* Bump immutabledict from 4.0.0 to 4.1.0. ([\#16812](https://github.com/element-hq/synapse/issues/16812)) +* Bump isort from 5.13.1 to 5.13.2. ([\#16835](https://github.com/element-hq/synapse/issues/16835)) +* Bump lxml from 4.9.3 to 5.1.0. ([\#16813](https://github.com/element-hq/synapse/issues/16813)) +* Bump pillow from 10.1.0 to 10.2.0. ([\#16802](https://github.com/element-hq/synapse/issues/16802)) +* Bump pydantic from 2.5.2 to 2.5.3. ([\#16836](https://github.com/element-hq/synapse/issues/16836)) +* Bump pyo3 from 0.20.0 to 0.20.2. ([\#16791](https://github.com/element-hq/synapse/issues/16791)) +* Bump regex from 1.9.6 to 1.10.3. ([\#16837](https://github.com/element-hq/synapse/issues/16837)) +* Bump ruff from 0.1.13 to 0.1.14. ([\#16838](https://github.com/element-hq/synapse/issues/16838)) +* Bump ruff from 0.1.7 to 0.1.13. ([\#16814](https://github.com/element-hq/synapse/issues/16814)) +* Bump sentry-sdk from 1.35.0 to 1.39.1. ([\#16799](https://github.com/element-hq/synapse/issues/16799)) +* Bump serde_json from 1.0.108 to 1.0.111. ([\#16792](https://github.com/element-hq/synapse/issues/16792)) +* Bump service-identity from 23.1.0 to 24.1.0. ([\#16816](https://github.com/element-hq/synapse/issues/16816)) +* Bump types-commonmark from 0.9.2.4 to 0.9.2.20240106. ([\#16797](https://github.com/element-hq/synapse/issues/16797)) +* Bump types-jsonschema from 4.20.0.0 to 4.20.0.20240105. ([\#16800](https://github.com/element-hq/synapse/issues/16800)) +* Bump types-jsonschema from 4.20.0.20240105 to 4.21.0.20240118. ([\#16834](https://github.com/element-hq/synapse/issues/16834)) +* Bump types-netaddr from 0.9.0.1 to 0.10.0.20240106. ([\#16839](https://github.com/element-hq/synapse/issues/16839)) +* Bump typing-extensions from 4.8.0 to 4.9.0. ([\#16815](https://github.com/element-hq/synapse/issues/16815)) + + +# Synapse 1.99.0 (2024-01-16) + +Synapse 1.99.0 is the first Synapse release under an AGPLv3.0 licence (with CLA to enable Element to sell AGPL +exceptions). You can read more about this here: + + - https://matrix.org/blog/2023/11/06/future-of-synapse-dendrite/ + - https://element.io/blog/element-to-adopt-agplv3/ + - https://element.io/blog/synapse-now-lives-at-github-com-element-hq-synapse/ + +No significant changes since 1.99.0rc1. + + +# Synapse 1.99.0rc1 (2024-01-09) + +### Features + +- Add [config options](https://element-hq.github.io/synapse/v1.99/usage/configuration/config_documentation.html#server_notices) to set the avatar and the topic of the server notices room, as well as the avatar of the server notices user. ([\#16679](https://github.com/matrix-org/synapse/issues/16679)) +- Add config option [`email.notif_delay_before_mail`](https://element-hq.github.io/synapse/v1.99/usage/configuration/config_documentation.html#email) to tweak the delay before an email is sent following a notification. ([\#16696](https://github.com/matrix-org/synapse/issues/16696)) +- Add new configuration option [`sentry.environment`](https://element-hq.github.io/synapse/v1.99/usage/configuration/config_documentation.html#sentry) for improved system monitoring. Contributed by @zeeshanrafiqrana. ([\#16738](https://github.com/matrix-org/synapse/issues/16738)) +- Filter out rooms from the room directory being served to other homeservers when those rooms block that homeserver by their Access Control Lists. ([\#16759](https://github.com/element-hq/synapse/issues/16759)) + +### Bugfixes + +- Fix a long-standing bug where the signing keys generated by Synapse were world-readable. Contributed by Fabian Klemp. ([\#16740](https://github.com/matrix-org/synapse/issues/16740)) +- Fix email verification redirection. Contributed by Fadhlan Ridhwanallah. ([\#16761](https://github.com/element-hq/synapse/issues/16761)) +- Fixed a bug that prevented users from being queried by display name if it contains non-ASCII characters. ([\#16767](https://github.com/element-hq/synapse/issues/16767)) +- Allow reactivate user without password with Admin API in some edge cases. ([\#16770](https://github.com/element-hq/synapse/issues/16770)) +- Adds the `recursion_depth` parameter to the response of the /relations endpoint if MSC3981 recursion is being performed. ([\#16775](https://github.com/element-hq/synapse/issues/16775)) + +### Improved Documentation + +- Added version picker for Synapse documentation. Contributed by @Dmytro27Ind. ([\#16533](https://github.com/matrix-org/synapse/issues/16533)) +- Clarify that `password_config.enabled: "only_for_reauth"` does not allow new logins to be created using password auth. ([\#16737](https://github.com/matrix-org/synapse/issues/16737)) +- Remove value from header in configuration documentation for `refresh_token_lifetime`. ([\#16763](https://github.com/element-hq/synapse/issues/16763)) +- Add another custom statistics collection server to the documentation. Contributed by @loelkes. ([\#16769](https://github.com/element-hq/synapse/issues/16769)) + +### Internal Changes + +- Remove run-once workflow after adding the version picker to the documentation. ([\#9453](https://github.com/element-hq/synapse/issues/9453)) +- Update the implementation of [MSC2965](https://github.com/matrix-org/matrix-spec-proposals/pull/2965) (OIDC Provider discovery). ([\#16726](https://github.com/matrix-org/synapse/issues/16726)) +- Move the rust stubs inline for better IDE integration. ([\#16757](https://github.com/element-hq/synapse/issues/16757)) +- Fix sample config doc CI. ([\#16758](https://github.com/element-hq/synapse/issues/16758)) +- Simplify event internal metadata class. ([\#16762](https://github.com/element-hq/synapse/issues/16762), [\#16780](https://github.com/element-hq/synapse/issues/16780)) +- Sign the published docker image using [cosign](https://docs.sigstore.dev/). ([\#16774](https://github.com/element-hq/synapse/issues/16774)) +- Port `EventInternalMetadata` class to Rust. ([\#16782](https://github.com/element-hq/synapse/issues/16782)) + + + +### Updates to locked dependencies + +* Bump actions/setup-go from 4 to 5. ([\#16749](https://github.com/matrix-org/synapse/issues/16749)) +* Bump actions/setup-python from 4 to 5. ([\#16748](https://github.com/matrix-org/synapse/issues/16748)) +* Bump immutabledict from 3.0.0 to 4.0.0. ([\#16743](https://github.com/matrix-org/synapse/issues/16743)) +* Bump isort from 5.12.0 to 5.13.0. ([\#16745](https://github.com/matrix-org/synapse/issues/16745)) +* Bump isort from 5.13.0 to 5.13.1. ([\#16752](https://github.com/matrix-org/synapse/issues/16752)) +* Bump pydantic from 2.5.1 to 2.5.2. ([\#16747](https://github.com/matrix-org/synapse/issues/16747)) +* Bump ruff from 0.1.6 to 0.1.7. ([\#16746](https://github.com/matrix-org/synapse/issues/16746)) +* Bump types-setuptools from 68.2.0.2 to 69.0.0.0. ([\#16744](https://github.com/matrix-org/synapse/issues/16744)) diff --git a/docs/code_style.md b/docs/code_style.md
index 026001b8a3..c28aaadad0 100644 --- a/docs/code_style.md +++ b/docs/code_style.md
@@ -8,9 +8,7 @@ errors in code. The necessary tools are: -- [black](https://black.readthedocs.io/en/stable/), a source code formatter; -- [isort](https://pycqa.github.io/isort/), which organises each file's imports; -- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors; and +- [ruff](https://github.com/charliermarsh/ruff), which can spot common errors and enforce a consistent style; and - [mypy](https://mypy.readthedocs.io/en/stable/), a type checker. See [the contributing guide](development/contributing_guide.md#run-the-linters) for instructions diff --git a/docs/development/cas.md b/docs/development/cas.md deleted file mode 100644
index 7c0668e034..0000000000 --- a/docs/development/cas.md +++ /dev/null
@@ -1,64 +0,0 @@ -# How to test CAS as a developer without a server - -The [django-mama-cas](https://github.com/jbittel/django-mama-cas) project is an -easy to run CAS implementation built on top of Django. - -## Prerequisites - -1. Create a new virtualenv: `python3 -m venv <your virtualenv>` -2. Activate your virtualenv: `source /path/to/your/virtualenv/bin/activate` -3. Install Django and django-mama-cas: - ```sh - python -m pip install "django<3" "django-mama-cas==2.4.0" - ``` -4. Create a Django project in the current directory: - ```sh - django-admin startproject cas_test . - ``` -5. Follow the [install directions](https://django-mama-cas.readthedocs.io/en/latest/installation.html#configuring) for django-mama-cas -6. Setup the SQLite database: `python manage.py migrate` -7. Create a user: - ```sh - python manage.py createsuperuser - ``` - 1. Use whatever you want as the username and password. - 2. Leave the other fields blank. -8. Use the built-in Django test server to serve the CAS endpoints on port 8000: - ```sh - python manage.py runserver - ``` - -You should now have a Django project configured to serve CAS authentication with -a single user created. - -## Configure Synapse (and Element) to use CAS - -1. Modify your `homeserver.yaml` to enable CAS and point it to your locally - running Django test server: - ```yaml - cas_config: - enabled: true - server_url: "http://localhost:8000" - service_url: "http://localhost:8081" - #displayname_attribute: name - #required_attributes: - # name: value - ``` -2. Restart Synapse. - -Note that the above configuration assumes the homeserver is running on port 8081 -and that the CAS server is on port 8000, both on localhost. - -## Testing the configuration - -Then in Element: - -1. Visit the login page with a Element pointing at your homeserver. -2. Click the Single Sign-On button. -3. Login using the credentials created with `createsuperuser`. -4. You should be logged in. - -If you want to repeat this process you'll need to manually logout first: - -1. http://localhost:8000/admin/ -2. Click "logout" in the top right. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index f079f61b48..d6efab96cf 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md
@@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common configuration: ```sh -$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal +$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bullseye ``` (Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.) diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md
index 37a06acc12..620d1c16b0 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md
@@ -162,7 +162,7 @@ by a unique name, the current status (stored in JSON), and some dependency infor * Whether the update requires a previous update to be complete. * A rough ordering for which to complete updates. -A new background updates needs to be added to the `background_updates` table: +A new background update needs to be added to the `background_updates` table: ```sql INSERT INTO background_updates (ordering, update_name, depends_on, progress_json) VALUES diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md
index e4378231aa..fa5ff4dcf7 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md
@@ -150,6 +150,28 @@ $ poetry shell $ poetry install --extras all ``` +If you want to go even further and remove the Poetry caches: + +```shell +# Find your Poetry cache directory +# Docs: https://github.com/python-poetry/poetry/blob/main/docs/configuration.md#cache-directory +$ poetry config cache-dir + +# Remove packages from all cached repositories +$ poetry cache clear --all . + +# Go completely nuclear and clear out everything Poetry cache related +# including the wheel artifacts which is not covered by the above command +# (see https://github.com/python-poetry/poetry/issues/10304) +# +# This is necessary in order to rebuild or fetch new wheels. For example, if you update +# the `icu` library in on your system, you will need to rebuild the PyICU Python package +# in order to incorporate the correct dynamically linked library locations otherwise you +# will run into errors like: `ImportError: libicui18n.so.75: cannot open shared object file: No such file or directory` +$ rm -rf $(poetry config cache-dir) +``` + + ## ...run a command in the `poetry` virtualenv? Use `poetry run cmd args` when you need the python virtualenv context. @@ -187,7 +209,7 @@ useful. ## ...add a new dependency? Either: -- manually update `pyproject.toml`; then `poetry lock --no-update`; or else +- manually update `pyproject.toml`; then `poetry lock`; or else - `poetry add packagename`. See `poetry add --help`; note the `--dev`, `--extras` and `--optional` flags in particular. @@ -202,12 +224,12 @@ poetry remove packagename ``` ought to do the trick. Alternatively, manually update `pyproject.toml` and -`poetry lock --no-update`. Include the updated `pyproject.toml` and `poetry.lock` +`poetry lock`. Include the updated `pyproject.toml` and `poetry.lock` files in your commit. ## ...update the version range for an existing dependency? -Best done by manually editing `pyproject.toml`, then `poetry lock --no-update`. +Best done by manually editing `pyproject.toml`, then `poetry lock`. Include the updated `pyproject.toml` and `poetry.lock` in your commit. ## ...update a dependency in the locked environment? @@ -233,7 +255,7 @@ poetry add packagename==1.2.3 # Get poetry to recompute the content-hash of pyproject.toml without changing # the locked package versions. -poetry lock --no-update +poetry lock ``` Either way, include the updated `poetry.lock` file in your commit. diff --git a/docs/development/saml.md b/docs/development/saml.md deleted file mode 100644
index b08bcb7419..0000000000 --- a/docs/development/saml.md +++ /dev/null
@@ -1,40 +0,0 @@ -# How to test SAML as a developer without a server - -https://fujifish.github.io/samling/samling.html (https://github.com/fujifish/samling) is a great resource for being able to tinker with the -SAML options within Synapse without needing to deploy and configure a complicated software stack. - -To make Synapse (and therefore Element) use it: - -1. Use the samling.html URL above or deploy your own and visit the IdP Metadata tab. -2. Copy the XML to your clipboard. -3. On your Synapse server, create a new file `samling.xml` next to your `homeserver.yaml` with - the XML from step 2 as the contents. -4. Edit your `homeserver.yaml` to include: - ```yaml - saml2_config: - sp_config: - allow_unknown_attributes: true # Works around a bug with AVA Hashes: https://github.com/IdentityPython/pysaml2/issues/388 - metadata: - local: ["samling.xml"] - ``` -5. Ensure that your `homeserver.yaml` has a setting for `public_baseurl`: - ```yaml - public_baseurl: http://localhost:8080/ - ``` -6. Run `apt-get install xmlsec1` and `pip install --upgrade --force 'pysaml2>=4.5.0'` to ensure - the dependencies are installed and ready to go. -7. Restart Synapse. - -Then in Element: - -1. Visit the login page and point Element towards your homeserver using the `public_baseurl` above. -2. Click the Single Sign-On button. -3. On the samling page, enter a Name Identifier and add a SAML Attribute for `uid=your_localpart`. - The response must also be signed. -4. Click "Next". -5. Click "Post Response" (change nothing). -6. You should be logged in. - -If you try and repeat this process, you may be automatically logged in using the information you -gave previously. To fix this, open your developer console (`F12` or `Ctrl+Shift+I`) while on the -samling page and clear the site data. In Chrome, this will be a button on the Application tab. diff --git a/docs/modules/media_repository_callbacks.md b/docs/modules/media_repository_callbacks.md new file mode 100644
index 0000000000..fc37130439 --- /dev/null +++ b/docs/modules/media_repository_callbacks.md
@@ -0,0 +1,66 @@ +# Media repository callbacks + +Media repository callbacks allow module developers to customise the behaviour of the +media repository on a per user basis. Media repository callbacks can be registered +using the module API's `register_media_repository_callbacks` method. + +The available media repository callbacks are: + +### `get_media_config_for_user` + +_First introduced in Synapse v1.132.0_ + +```python +async def get_media_config_for_user(user_id: str) -> Optional[JsonDict] +``` + +**<span style="color:red"> +Caution: This callback is currently experimental . The method signature or behaviour +may change without notice. +</span>** + +Called when processing a request from a client for the +[media config endpoint](https://spec.matrix.org/latest/client-server-api/#get_matrixclientv1mediaconfig). + +The arguments passed to this callback are: + +* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) making the request. + +If the callback returns a dictionary then it will be used as the body of the response to the +client. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `None`, Synapse falls through to the next one. The value of the first +callback that does not return `None` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. + +If no module returns a non-`None` value then the default media config will be returned. + +### `is_user_allowed_to_upload_media_of_size` + +_First introduced in Synapse v1.132.0_ + +```python +async def is_user_allowed_to_upload_media_of_size(user_id: str, size: int) -> bool +``` + +**<span style="color:red"> +Caution: This callback is currently experimental . The method signature or behaviour +may change without notice. +</span>** + +Called before media is accepted for upload from a user, in case the module needs to +enforce a different limit for the particular user. + +The arguments passed to this callback are: + +* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) making the request. +* `size`: The size in bytes of media that is being requested to upload. + +If the module returns `False`, the current request will be denied with the error code +`M_TOO_LARGE` and the HTTP status code 413. + +If multiple modules implement this callback, they will be considered in order. If a callback +returns `True`, Synapse falls through to the next one. The value of the first callback that +returns `False` will be used. If this happens, Synapse will not call any of the subsequent +implementations of this callback. diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md
index d66ac7df31..6b3105de34 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md
@@ -144,16 +144,6 @@ Here's an example featuring all currently supported keys: "m.login.dummy": True, # Dummy authentication "m.login.terms": True, # User has accepted the terms of service for the homeserver "m.login.recaptcha": True, # User has completed the recaptcha challenge - "m.login.email.identity": { # User has provided and verified an email address - "medium": "email", - "address": "alice@example.com", - "validated_at": 1642701357084, - }, - "m.login.msisdn": { # User has provided and verified a phone number - "medium": "msisdn", - "address": "33123456789", - "validated_at": 1642701357084, - }, "m.login.registration_token": "sometoken", # User has registered through a registration token } ``` @@ -200,26 +190,6 @@ callback that does not return `None` will be used. If this happens, Synapse will any of the subsequent implementations of this callback. If every callback returns `None`, the username will be used (e.g. `alice` if the user being registered is `@alice:example.com`). -## `is_3pid_allowed` - -_First introduced in Synapse v1.53.0_ - -```python -async def is_3pid_allowed(self, medium: str, address: str, registration: bool) -> bool -``` - -Called when attempting to bind a third-party identifier (i.e. an email address or a phone -number). The module is given the medium of the third-party identifier (which is `email` if -the identifier is an email address, or `msisdn` if the identifier is a phone number) and -its address, as well as a boolean indicating whether the attempt to bind is happening as -part of registering a new user. The module must return a boolean indicating whether the -identifier can be allowed to be bound to an account on the local homeserver. - -If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. - ## Example The example module below implements authentication checkers for two different login types: diff --git a/docs/modules/ratelimit_callbacks.md b/docs/modules/ratelimit_callbacks.md new file mode 100644
index 0000000000..30d94024fa --- /dev/null +++ b/docs/modules/ratelimit_callbacks.md
@@ -0,0 +1,43 @@ +# Ratelimit callbacks + +Ratelimit callbacks allow module developers to override ratelimit settings dynamically whilst +Synapse is running. Ratelimit callbacks can be registered using the module API's +`register_ratelimit_callbacks` method. + +The available ratelimit callbacks are: + +### `get_ratelimit_override_for_user` + +_First introduced in Synapse v1.132.0_ + +```python +async def get_ratelimit_override_for_user(user: str, limiter_name: str) -> Optional[synapse.module_api.RatelimitOverride] +``` + +**<span style="color:red"> +Caution: This callback is currently experimental . The method signature or behaviour +may change without notice. +</span>** + +Called when constructing a ratelimiter of a particular type for a user. The module can +return a `messages_per_second` and `burst_count` to be used, or `None` if +the default settings are adequate. The user is represented by their Matrix user ID +(e.g. `@alice:example.com`). The limiter name is usually taken from the `RatelimitSettings` key +value. + +The limiters that are currently supported are: + +- `rc_invites.per_room` +- `rc_invites.per_user` +- `rc_invites.per_issuer` + +The `RatelimitOverride` return type has the following fields: + +- `per_second: float`. The number of actions that can be performed in a second. `0.0` means that ratelimiting is disabled. +- `burst_count: int`. The number of actions that can be performed before being limited. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `None`, Synapse falls through to the next one. The value of the first +callback that does not return `None` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. If no module returns a non-`None` value +then the default settings will be used. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
index ffdfe6082e..39d7cbc000 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md
@@ -76,8 +76,9 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when processing an invitation. Both inviter and invitee are -represented by their Matrix user ID (e.g. `@alice:example.com`). +Called when processing an invitation, both when one is created locally or when +receiving an invite over federation. Both inviter and invitee are represented by +their Matrix user ID (e.g. `@alice:example.com`). The callback must return one of: @@ -112,7 +113,9 @@ async def user_may_send_3pid_invite( ``` Called when processing an invitation using a third-party identifier (also called a 3PID, -e.g. an email address or a phone number). +e.g. an email address or a phone number). It is only called when a 3PID invite is created +locally - not when one is received in a room over federation. If the 3PID is already associated +with a Matrix ID, the spam check will go through the `user_may_invite` callback instead. The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the invitee is represented by its medium (e.g. "email") and its address @@ -156,12 +159,19 @@ _First introduced in Synapse v1.37.0_ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ +_Changed in Synapse v1.132.0: Added the `room_config` argument. Callbacks that only expect a single `user_id` argument are still supported._ + ```python -async def user_may_create_room(user_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] +async def user_may_create_room(user_id: str, room_config: synapse.module_api.JsonDict) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` Called when processing a room creation request. +The arguments passed to this callback are: + +* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`). +* `room_config`: The contents of the body of a [/createRoom request](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom) as a dictionary. + The callback must return one of: - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still decide to reject it. @@ -236,13 +246,48 @@ be used. If this happens, Synapse will not call any of the subsequent implementa this callback. +### `user_may_send_state_event` + +_First introduced in Synapse v1.132.0_ + +```python +async def user_may_send_state_event(user_id: str, room_id: str, event_type: str, state_key: str, content: JsonDict) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"] +``` + +**<span style="color:red"> +Caution: This callback is currently experimental . The method signature or behaviour +may change without notice. +</span>** + +Called when processing a request to [send state events](https://spec.matrix.org/latest/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey) to a room. + +The arguments passed to this callback are: + +* `user_id`: The Matrix user ID of the user (e.g. `@alice:example.com`) sending the state event. +* `room_id`: The ID of the room that the requested state event is being sent to. +* `event_type`: The requested type of event. +* `state_key`: The requested state key. +* `content`: The requested event contents. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + ### `check_username_for_spam` _First introduced in Synapse v1.37.0_ ```python -async def check_username_for_spam(user_profile: synapse.module_api.UserProfile) -> bool +async def check_username_for_spam(user_profile: synapse.module_api.UserProfile, requester_id: str) -> bool ``` Called when computing search results in the user directory. The module must return a @@ -261,6 +306,8 @@ The profile is represented as a dictionary with the following keys: The module is given a copy of the original dictionary, so modifying it from within the module cannot modify a user's profile when included in user directory search results. +The requester_id parameter is the ID of the user that called the user directory API. + If multiple modules implement this callback, they will be considered in order. If a callback returns `False`, Synapse falls through to the next one. The value of the first callback that does not return `False` will be used. If this happens, Synapse will not call @@ -348,6 +395,8 @@ callback returns `False`, Synapse falls through to the next one. The value of th callback that does not return `False` will be used. If this happens, Synapse will not call any of the subsequent implementations of this callback. +Note that this check is applied to federation invites as of Synapse v1.130.0. + ### `check_login_for_spam` diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md
index b97e28db11..b4162a317d 100644 --- a/docs/modules/third_party_rules_callbacks.md +++ b/docs/modules/third_party_rules_callbacks.md
@@ -86,26 +86,6 @@ room creation will be forbidden as soon as one of the callbacks raises an except this happens, Synapse will not call any of the subsequent implementations of this callback. -### `check_threepid_can_be_invited` - -_First introduced in Synapse v1.39.0_ - -```python -async def check_threepid_can_be_invited( - medium: str, - address: str, - state_events: "synapse.types.StateMap", -) -> bool: -``` - -Called when processing an invite via a third-party identifier (i.e. email or phone number). -The module must return a boolean indicating whether the invite can go through. - -If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. - ### `check_visibility_can_be_modified` _First introduced in Synapse v1.39.0_ @@ -254,67 +234,6 @@ admin API. If multiple modules implement this callback, Synapse runs them all in order. -### `on_threepid_bind` - -_First introduced in Synapse v1.56.0_ - -**<span style="color:red"> -This callback is deprecated in favour of the `on_add_user_third_party_identifier` callback, which -features the same functionality. The only difference is in name. -</span>** - -```python -async def on_threepid_bind(user_id: str, medium: str, address: str) -> None: -``` - -Called after creating an association between a local user and a third-party identifier -(email address, phone number). The module is given the Matrix ID of the user the -association is for, as well as the medium (`email` or `msisdn`) and address of the -third-party identifier. - -Note that this callback is _not_ called after a successful association on an _identity -server_. - -If multiple modules implement this callback, Synapse runs them all in order. - -### `on_add_user_third_party_identifier` - -_First introduced in Synapse v1.79.0_ - -```python -async def on_add_user_third_party_identifier(user_id: str, medium: str, address: str) -> None: -``` - -Called after successfully creating an association between a user and a third-party identifier -(email address, phone number). The module is given the Matrix ID of the user the -association is for, as well as the medium (`email` or `msisdn`) and address of the -third-party identifier (i.e. an email address). - -Note that this callback is _not_ called if a user attempts to bind their third-party identifier -to an identity server (via a call to [`POST -/_matrix/client/v3/account/3pid/bind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidbind)). - -If multiple modules implement this callback, Synapse runs them all in order. - -### `on_remove_user_third_party_identifier` - -_First introduced in Synapse v1.79.0_ - -```python -async def on_remove_user_third_party_identifier(user_id: str, medium: str, address: str) -> None: -``` - -Called after successfully removing an association between a user and a third-party identifier -(email address, phone number). The module is given the Matrix ID of the user the -association is for, as well as the medium (`email` or `msisdn`) and address of the -third-party identifier (i.e. an email address). - -Note that this callback is _not_ called if a user attempts to unbind their third-party -identifier from an identity server (via a call to [`POST -/_matrix/client/v3/account/3pid/unbind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidunbind)). - -If multiple modules implement this callback, Synapse runs them all in order. - ## Example The example below is a module that implements the third-party rules callback diff --git a/docs/openid.md b/docs/openid.md
index 7a10b1615b..f86ba189c7 100644 --- a/docs/openid.md +++ b/docs/openid.md
@@ -23,6 +23,7 @@ such as [Github][github-idp]. [auth0]: https://auth0.com/ [authentik]: https://goauthentik.io/ [lemonldap]: https://lemonldap-ng.org/ +[pocket-id]: https://pocket-id.org/ [okta]: https://www.okta.com/ [dex-idp]: https://github.com/dexidp/dex [keycloak-idp]: https://www.keycloak.org/docs/latest/server_admin/#sso-protocols @@ -336,6 +337,36 @@ but it has a `response_types_supported` which excludes "code" (which we rely on, is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)), so we have to disable discovery and configure the URIs manually. +### Forgejo + +Forgejo is a fork of Gitea that can act as an OAuth2 provider. + +The implementation of OAuth2 is improved compared to Gitea, as it provides a correctly defined `subject_claim` and `scopes`. + +Synapse config: + +```yaml +oidc_providers: + - idp_id: forgejo + idp_name: Forgejo + discover: false + issuer: "https://your-forgejo.com/" + client_id: "your-client-id" # TO BE FILLED + client_secret: "your-client-secret" # TO BE FILLED + client_auth_method: client_secret_post + scopes: ["openid", "profile", "email", "groups"] + authorization_endpoint: "https://your-forgejo.com/login/oauth/authorize" + token_endpoint: "https://your-forgejo.com/login/oauth/access_token" + userinfo_endpoint: "https://your-forgejo.com/api/v1/user" + user_mapping_provider: + config: + subject_claim: "sub" + picture_claim: "picture" + localpart_template: "{{ user.preferred_username }}" + display_name_template: "{{ user.name }}" + email_template: "{{ user.email }}" +``` + ### GitHub [GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but @@ -594,6 +625,32 @@ oidc_providers: Note that the fields `client_id` and `client_secret` are taken from the CURL response above. +### Pocket ID + +[Pocket ID][pocket-id] is a simple OIDC provider that allows users to authenticate with their passkeys. +1. Go to `OIDC Clients` +2. Click on `Add OIDC Client` +3. Add a name, for example `Synapse` +4. Add `"https://auth.example.org/_synapse/client/oidc/callback` to `Callback URLs` # Replace `auth.example.org` with your domain +5. Click on `Save` +6. Note down your `Client ID` and `Client secret`, these will be used later + +Synapse config: + +```yaml +oidc_providers: + - idp_id: pocket_id + idp_name: Pocket ID + issuer: "https://auth.example.org/" # Replace with your domain + client_id: "your-client-id" # Replace with the "Client ID" you noted down before + client_secret: "your-client-secret" # Replace with the "Client secret" you noted down before + scopes: ["openid", "profile"] + user_mapping_provider: + config: + localpart_template: "{{ user.preferred_username }}" + display_name_template: "{{ user.name }}" +``` + ### Shibboleth with OIDC Plugin [Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities. diff --git a/docs/postgres.md b/docs/postgres.md
index d06f0cda10..d51f54c722 100644 --- a/docs/postgres.md +++ b/docs/postgres.md
@@ -100,6 +100,18 @@ database: keepalives_count: 3 ``` +## Postgresql major version upgrades + +Postgres uses separate directories for database locations between major versions (typically `/var/lib/postgresql/<version>/main`). + +Therefore, it is recommended to stop Synapse and other services (MAS, etc) before upgrading Postgres major versions. + +It is also strongly recommended to [back up](./usage/administration/backups.md#database) your database beforehand to ensure no data loss arising from a failed upgrade. + +## Backups + +Don't forget to [back up](./usage/administration/backups.md#database) your database! + ## Tuning Postgres The default settings should be fine for most deployments. For larger diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md
index 7128af114e..f871a39939 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md
@@ -5,10 +5,10 @@ It is recommended to put a reverse proxy such as [Apache](https://httpd.apache.org/docs/current/mod/mod_proxy_http.html), [Caddy](https://caddyserver.com/docs/quick-starts/reverse-proxy), [HAProxy](https://www.haproxy.org/) or -[relayd](https://man.openbsd.org/relayd.8) in front of Synapse. One advantage -of doing so is that it means that you can expose the default https port -(443) to Matrix clients without needing to run Synapse with root -privileges. +[relayd](https://man.openbsd.org/relayd.8) in front of Synapse. +This has the advantage of being able to expose the default HTTPS port (443) to Matrix +clients without requiring Synapse to bind to a privileged port (port numbers less than +1024), avoiding the need for `CAP_NET_BIND_SERVICE` or running as root. You should configure your reverse proxy to forward requests to `/_matrix` or `/_synapse/client` to Synapse, and have it set the `X-Forwarded-For` and @@ -74,7 +74,7 @@ server { proxy_pass http://localhost:8008; proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Host $host; + proxy_set_header Host $host:$server_port; # Nginx by default only allows file uploads up to 1M in size # Increase client_max_body_size to match max_upload_size defined in homeserver.yaml diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index f538e1498a..22f6950625 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md
@@ -52,8 +52,6 @@ architecture via <https://packages.matrix.org/debian/>. To install the latest release: -TODO UPDATE ALL THIS - ```sh sudo apt install -y lsb-release wget apt-transport-https sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg @@ -159,7 +157,7 @@ sudo pip install py-bcrypt #### Alpine Linux -6543 maintains [Synapse packages for Alpine Linux](https://pkgs.alpinelinux.org/packages?name=synapse&branch=edge) in the community repository. Install with: +Jahway603 maintains [Synapse packages for Alpine Linux](https://pkgs.alpinelinux.org/packages?name=synapse&branch=edge) in the community repository. Install with: ```sh sudo apk add synapse @@ -210,7 +208,7 @@ When following this route please make sure that the [Platform-specific prerequis System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.8 or later, up to Python 3.11. +- Python 3.9 or later, up to Python 3.13. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org If building on an uncommon architecture for which pre-built wheels are @@ -312,29 +310,18 @@ sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \ sudo dnf group install "Development Tools" ``` -##### Red Hat Enterprise Linux / Rocky Linux +##### Red Hat Enterprise Linux / Rocky Linux / Oracle Linux -*Note: The term "RHEL" below refers to both Red Hat Enterprise Linux and Rocky Linux. The distributions are 1:1 binary compatible.* +*Note: The term "RHEL" below refers to Red Hat Enterprise Linux, Oracle Linux and Rocky Linux. The distributions are 1:1 binary compatible.* -It's recommended to use the latest Python versions. +It's recommended to use the latest Python versions. -RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ship with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them. +RHEL 8 in particular ships with Python 3.6 by default which is EOL and therefore no longer supported by Synapse. RHEL 9 ships with Python 3.9 which is still supported by the Python core team as of this writing. However, newer Python versions provide significant performance improvements and they're available in official distributions' repositories. Therefore it's recommended to use them. Python 3.11 and 3.12 are available for both RHEL 8 and 9. These commands should be run as root user. -RHEL 8 -```bash -# Enable PowerTools repository -dnf config-manager --set-enabled powertools -``` -RHEL 9 -```bash -# Enable CodeReady Linux Builder repository -crb enable -``` - Install new version of Python. You only need one of these: ```bash # Python 3.11 @@ -346,7 +333,7 @@ dnf install python3.12 python3.12-devel ``` Finally, install common prerequisites ```bash -dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf +dnf install libicu libicu-devel libpq5 libpq5-devel lz4 pkgconf dnf group install "Development Tools" ``` ###### Using venv module instead of virtualenv command @@ -355,7 +342,7 @@ It's recommended to use Python venv module directly rather than the virtualenv c * On RHEL 9, virtualenv is only available on [EPEL](https://docs.fedoraproject.org/en-US/epel/). * On RHEL 8, virtualenv is based on Python 3.6. It does not support creating 3.11/3.12 virtual environments. -Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI. +Here's an example of creating Python 3.12 virtual environment and installing Synapse from PyPI. ```bash mkdir -p ~/synapse @@ -389,7 +376,7 @@ If you're struggling to get icu discovered, and see: ``` despite it being installed and having your `PATH` updated, you can omit this dependency by not specifying `--extras all` to `poetry`. If using postgres, you can install Synapse via -`poetry install --extras saml2 --extras oidc --extras postgres --extras opentracing --extras redis --extras sentry`. +`poetry install --extras oidc --extras postgres --extras opentracing --extras redis --extras sentry`. ICU is not a hard dependency on getting a working installation. On ARM-based Macs you may also need to install libjpeg and libpq: @@ -658,6 +645,10 @@ This also requires the optional `lxml` python dependency to be installed. This in turn requires the `libxml2` library to be available - on Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for your OS. +### Backups + +Don't forget to take [backups](../usage/administration/backups.md) of your new server! + ### Troubleshooting Installation `pip` seems to leak *lots* of memory during installation. For instance, a Linux diff --git a/docs/spam_checker.md b/docs/spam_checker.md
index 1b6d814937..ead0f03595 100644 --- a/docs/spam_checker.md +++ b/docs/spam_checker.md
@@ -63,7 +63,7 @@ class ExampleSpamChecker: async def user_may_invite(self, inviter_userid, invitee_userid, room_id): return True # allow all invites - async def user_may_create_room(self, userid): + async def user_may_create_room(self, userid, room_config): return True # allow all room creations async def user_may_create_room_alias(self, userid, room_alias): @@ -72,8 +72,8 @@ class ExampleSpamChecker: async def user_may_publish_room(self, userid, room_id): return True # allow publishing of all rooms - async def check_username_for_spam(self, user_profile): - return False # allow all usernames + async def check_username_for_spam(self, user_profile, requester_id): + return False # allow all usernames regardless of requester async def check_registration_for_spam( self, diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md
index d6c4e860ae..e09b733ce7 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md
@@ -10,9 +10,9 @@ As an example, a SSO service may return the email address to turn that into a displayname when creating a Matrix user for this individual. It may choose `John Smith`, or `Smith, John [Example.com]` or any number of variations. As each Synapse configuration may want something different, this is -where SAML mapping providers come into play. +where SSO mapping providers come into play. -SSO mapping providers are currently supported for OpenID and SAML SSO +SSO mapping providers are currently supported for OpenID SSO configurations. Please see the details below for how to implement your own. It is up to the mapping provider whether the user should be assigned a predefined @@ -119,90 +119,3 @@ A custom mapping provider must specify the following methods: Synapse has a built-in OpenID mapping provider if a custom provider isn't specified in the config. It is located at [`synapse.handlers.oidc.JinjaOidcMappingProvider`](https://github.com/element-hq/synapse/blob/develop/synapse/handlers/oidc.py). - -## SAML Mapping Providers - -The SAML mapping provider can be customized by editing the -[`saml2_config.user_mapping_provider.module`](usage/configuration/config_documentation.md#saml2_config) -config option. - -`saml2_config.user_mapping_provider.config` allows you to provide custom -configuration options to the module. Check with the module's documentation for -what options it provides (if any). The options listed by default are for the -user mapping provider built in to Synapse. If using a custom module, you should -comment these options out and use those specified by the module instead. - -### Building a Custom SAML Mapping Provider - -A custom mapping provider must specify the following methods: - -* `def __init__(self, parsed_config, module_api)` - - Arguments: - - `parsed_config` - A configuration object that is the return value of the - `parse_config` method. You should set any configuration options needed by - the module here. - - `module_api` - a `synapse.module_api.ModuleApi` object which provides the - stable API available for extension modules. -* `def parse_config(config)` - - **This method should have the `@staticmethod` decoration.** - - Arguments: - - `config` - A `dict` representing the parsed content of the - `saml_config.user_mapping_provider.config` homeserver config option. - Runs on homeserver startup. Providers should extract and validate - any option values they need here. - - Whatever is returned will be passed back to the user mapping provider module's - `__init__` method during construction. -* `def get_saml_attributes(config)` - - **This method should have the `@staticmethod` decoration.** - - Arguments: - - `config` - A object resulting from a call to `parse_config`. - - Returns a tuple of two sets. The first set equates to the SAML auth - response attributes that are required for the module to function, whereas - the second set consists of those attributes which can be used if available, - but are not necessary. -* `def get_remote_user_id(self, saml_response, client_redirect_url)` - - Arguments: - - `saml_response` - A `saml2.response.AuthnResponse` object to extract user - information from. - - `client_redirect_url` - A string, the URL that the client will be - redirected to. - - This method must return a string, which is the unique, immutable identifier - for the user. Commonly the `uid` claim of the response. -* `def saml_response_to_user_attributes(self, saml_response, failures, client_redirect_url)` - - Arguments: - - `saml_response` - A `saml2.response.AuthnResponse` object to extract user - information from. - - `failures` - An `int` that represents the amount of times the returned - mxid localpart mapping has failed. This should be used - to create a deduplicated mxid localpart which should be - returned instead. For example, if this method returns - `john.doe` as the value of `mxid_localpart` in the returned - dict, and that is already taken on the homeserver, this - method will be called again with the same parameters but - with failures=1. The method should then return a different - `mxid_localpart` value, such as `john.doe1`. - - `client_redirect_url` - A string, the URL that the client will be - redirected to. - - This method must return a dictionary, which will then be used by Synapse - to build a new user. The following keys are allowed: - * `mxid_localpart` - A string, the mxid localpart of the new user. If this is - `None`, the user is prompted to pick their own username. This is only used - during a user's first login. Once a localpart has been associated with a - remote user ID (see `get_remote_user_id`) it cannot be updated. - * `displayname` - The displayname of the new user. If not provided, will default to - the value of `mxid_localpart`. - * `emails` - A list of emails for the new user. If not provided, will - default to an empty list. - - Alternatively it can raise a `synapse.api.errors.RedirectException` to - redirect the user to another page. This is useful to prompt the user for - additional information, e.g. if you want them to provide their own username. - It is the responsibility of the mapping provider to either redirect back - to `client_redirect_url` (including any additional information) or to - complete registration using methods from the `ModuleApi`. - -### Default SAML Mapping Provider - -Synapse has a built-in SAML mapping provider if a custom provider isn't -specified in the config. It is located at -[`synapse.handlers.saml.DefaultSamlMappingProvider`](https://github.com/element-hq/synapse/blob/develop/synapse/handlers/saml.py). diff --git a/docs/upgrade.md b/docs/upgrade.md
index 52b1adbe90..d508e2231e 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md
@@ -117,6 +117,107 @@ each upgrade are complete before moving on to the next upgrade, to avoid stacking them up. You can monitor the currently running background updates with [the Admin API](usage/administration/admin_api/background_updates.html#status). +# Upgrading to v1.130.0 + +## Documented endpoint which can be delegated to a federation worker + +The endpoint `^/_matrix/federation/v1/version$` can be delegated to a federation +worker. This is not new behaviour, but had not been documented yet. The +[list of delegatable endpoints](workers.md#synapseappgeneric_worker) has +been updated to include it. Make sure to check your reverse proxy rules if you +are using workers. + +# Upgrading to v1.126.0 + +## Room list publication rules change + +The default [`room_list_publication_rules`] setting was changed to disallow +anyone (except server admins) from publishing to the room list by default. + +This is in line with Synapse policy of locking down features by default that can +be abused without moderation. + +To keep the previous behavior of allowing publication by default, add the +following to the config: + +```yaml +room_list_publication_rules: + - "action": "allow" +``` + +[`room_list_publication_rules`]: usage/configuration/config_documentation.md#room_list_publication_rules + +## Change of signing key expiry date for the Debian/Ubuntu package repository + +Administrators using the Debian/Ubuntu packages from `packages.matrix.org`, +please be aware that we have recently updated the expiry date on the repository's GPG signing key, +but this change must be imported into your keyring. + +If you have the `matrix-org-archive-keyring` package installed and it updates before the current key expires, this should +happen automatically. + +Otherwise, if you see an error similar to `The following signatures were invalid: EXPKEYSIG F473DD4473365DE1`, you +will need to get a fresh copy of the keys. You can do so with: + +```sh +sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg +``` + +The old version of the key will expire on `2025-03-15`. + +# Upgrading to v1.122.0 + +## Dropping support for PostgreSQL 11 and 12 + +In line with our [deprecation policy](deprecation_policy.md), we've dropped +support for PostgreSQL 11 and 12, as they are no longer supported upstream. +This release of Synapse requires PostgreSQL 13+. + +# Upgrading to v1.120.0 + +## Removal of experimental MSC3886 feature + +[MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) +has been closed (and will not enter the Matrix spec). As such, we are +removing the experimental support for it in this release. + +The `experimental_features.msc3886_endpoint` configuration option has +been removed. + +## Authenticated media is now enforced by default + +The [`enable_authenticated_media`] configuration option now defaults to true. + +This means that clients and remote (federated) homeservers now need to use +the authenticated media endpoints in order to download media from your +homeserver. + +As an exception, existing media that was stored on the server prior to +this option changing to `true` will still be accessible over the +unauthenticated endpoints. + +The matrix.org homeserver has already been running with this option enabled +since September 2024, so most common clients and homeservers should already +be compatible. + +With that said, administrators who wish to disable this feature for broader +compatibility can still do so by manually configuring +`enable_authenticated_media: False`. + +[`enable_authenticated_media`]: usage/configuration/config_documentation.md#enable_authenticated_media + + +# Upgrading to v1.119.0 + +## Minimum supported Python version + +The minimum supported Python version has been increased from v3.8 to v3.9. +You will need Python 3.9+ to run Synapse v1.119.0 (due out Nov 7th, 2024). + +If you use current versions of the Matrix.org-distributed Docker images, no action is required. +Please note that support for Ubuntu `focal` was dropped as well since it uses Python 3.8. + + # Upgrading to v1.111.0 ## New worker endpoints for authenticated client and federation media diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md
index a1184d0375..20f8c6a157 100644 --- a/docs/usage/administration/admin_faq.md +++ b/docs/usage/administration/admin_faq.md
@@ -160,7 +160,7 @@ Using the following curl command: ```console curl -H 'Authorization: Bearer <access-token>' -X DELETE https://matrix.org/_matrix/client/r0/directory/room/<room-alias> ``` -`<access-token>` - can be obtained in riot by looking in the riot settings, down the bottom is: +`<access-token>` - can be obtained in element by looking in All settings, clicking Help & About and down the bottom is: Access Token:\<click to reveal\> `<room-alias>` - the room alias, eg. #my_room:matrix.org this possibly needs to be URL encoded also, for example %23my_room%3Amatrix.org @@ -255,6 +255,8 @@ line to `/etc/default/matrix-synapse`: LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2 +*Note*: You may need to set `PYTHONMALLOC=malloc` to ensure that `jemalloc` can accurately calculate memory usage. By default, Python uses its internal small-object allocator, which may interfere with jemalloc's ability to track memory consumption correctly. This could prevent the [cache_autotuning](../configuration/config_documentation.md#caches) feature from functioning as expected, as the Python allocator may not reach the memory threshold set by `max_cache_memory_usage`, thus not triggering the cache eviction process. + This made a significant difference on Python 2.7 - it's unclear how much of an improvement it provides on Python 3.x. diff --git a/docs/usage/administration/backups.md b/docs/usage/administration/backups.md new file mode 100644
index 0000000000..24d250179b --- /dev/null +++ b/docs/usage/administration/backups.md
@@ -0,0 +1,125 @@ +# How to back up a Synapse homeserver + +It is critical to maintain good backups of your server, to guard against +hardware failure as well as potential corruption due to bugs or administrator +error. + +This page documents the things you will need to consider backing up as part of +a Synapse installation. + +## Configuration files + +Keep a copy of your configuration file (`homeserver.yaml`), as well as any +auxiliary config files it refers to such as the +[`log_config`](../configuration/config_documentation.md#log_config) file, +[`app_service_config_files`](../configuration/config_documentation.md#app_service_config_files). +Often, all such config files will be kept in a single directory such as +`/etc/synapse`, which will make this easier. + +## Server signing key + +Your server has a [signing +key](../configuration/config_documentation.md#signing_key_path) which it uses +to sign events and outgoing federation requests. It is easiest to back it up +with your configuration files, but an alternative is to have Synapse create a +new signing key if you have to restore. + +If you do decide to replace the signing key, you should add the old *public* +key to +[`old_signing_keys`](../configuration/config_documentation.md#old_signing_keys). + +## Database + +Synapse's support for SQLite is only suitable for testing purposes, so for the +purposes of this document, we'll assume you are using +[PostgreSQL](../../postgres.md). + +A full discussion of backup strategies for PostgreSQL is out of scope for this +document; see the [PostgreSQL +documentation](https://www.postgresql.org/docs/current/backup.html) for +detailed information. + +### Synapse-specfic details + + * Be very careful not to restore into a database that already has tables + present. At best, this will error; at worst, it will lead to subtle database + inconsistencies. + + * The `e2e_one_time_keys_json` table should **not** be backed up, or if it is + backed up, should be + [`TRUNCATE`d](https://www.postgresql.org/docs/current/sql-truncate.html) + after restoring the database before Synapse is started. + + [Background: restoring the database to an older backup can cause + used one-time-keys to be re-issued, causing subsequent [message decryption + errors](https://github.com/element-hq/element-meta/issues/2155). Clearing + all one-time-keys from the database ensures that this cannot happen, and + will prompt clients to generate and upload new one-time-keys.] + +### Quick and easy database backup and restore + +Typically, the easiest solution is to use `pg_dump` to take a copy of the whole +database. We recommend `pg_dump`'s custom dump format, as it produces +significantly smaller backup files. + +```shell +sudo -u postgres pg_dump -Fc --exclude-table-data e2e_one_time_keys_json synapse > synapse.dump +``` + +There is no need to stop Postgres or Synapse while `pg_dump` is running: it +will take a consistent snapshot of the databse. + +To restore, you will need to recreate the database as described in [Using +Postgres](../../postgres.md#set-up-database), +then load the dump into it with `pg_restore`: + +```shell +sudo -u postgres createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse +sudo -u postgres pg_restore -d synapse < synapse.dump +``` + +(If you forgot to exclude `e2e_one_time_keys_json` during `pg_dump`, remember +to connect to the new database and `TRUNCATE e2e_one_time_keys_json;` before +starting Synapse.) + +To reiterate: do **not** restore a dump over an existing database. + +Again, if you plan to run your homeserver at any sort of production level, we +recommend studying the PostgreSQL documentation on backup options. + +## Media store + +Synapse keeps a copy of media uploaded by users, including avatars and message +attachments, in its [Media +store](../configuration/config_documentation.md#media-store). + +It is a directory on the local disk, containing the following directories: + + * `local_content`: this is content uploaded by your local users. As a general + rule, you should back this up: it may represent the only copy of those + media files anywhere in the federation, and if they are lost, users will + see errors when viewing user or room avatars, and messages with attachments. + + * `local_thumbnails`: "thumbnails" of images uploaded by your users. If + [`dynamic_thumbnails`](../configuration/config_documentation.md#dynamic_thumbnails) + is enabled, these will be regenerated if they are removed from the disk, and + there is therefore no need to back them up. + + If `dynamic_thumbnails` is *not* enabled (the default): although this can + theoretically be regenerated from `local_content`, there is no tooling to do + so. We recommend that these are backed up too. + + * `remote_content`: this is a cache of content that was uploaded by a user on + another server, and has since been requested by a user on your own server. + + Typically there is no need to back up this directory: if a file in this directory + is removed, Synapse will attempt to fetch it again from the remote + server. + + * `remote_thumbnails`: thumbnails of images uploaded by users on other + servers. As with `remote_content`, there is normally no need to back this + up. + + * `url_cache`, `url_cache_thumbnails`: temporary caches of files downloaded + by the [URL previews](../../setup/installation.md#url-previews) feature. + These do not need to be backed up. diff --git a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md
index 4c0dbb5acd..a8a717e2a2 100644 --- a/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md +++ b/docs/usage/administration/monitoring/reporting_homeserver_usage_statistics.md
@@ -30,7 +30,7 @@ The following statistics are sent to the configured reporting endpoint: | `python_version` | string | The Python version number in use (e.g "3.7.1"). Taken from `sys.version_info`. | | `total_users` | int | The number of registered users on the homeserver. | | `total_nonbridged_users` | int | The number of users, excluding those created by an Application Service. | -| `daily_user_type_native` | int | The number of native users created in the last 24 hours. | +| `daily_user_type_native` | int | The number of native, non-guest users created in the last 24 hours. | | `daily_user_type_guest` | int | The number of guest users created in the last 24 hours. | | `daily_user_type_bridged` | int | The number of users created by Application Services in the last 24 hours. | | `total_room_count` | int | The total number of rooms present on the homeserver. | @@ -50,8 +50,8 @@ The following statistics are sent to the configured reporting endpoint: | `cache_factor` | int | The configured [`global factor`](../../configuration/config_documentation.md#caching) value for caching. | | `event_cache_size` | int | The configured [`event_cache_size`](../../configuration/config_documentation.md#caching) value for caching. | | `database_engine` | string | The database engine that is in use. Either "psycopg2" meaning PostgreSQL is in use, or "sqlite3" for SQLite3. | -| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. | -| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. | +| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. | +| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. | [^1]: Native matrix users and guests are always counted. If the diff --git a/docs/usage/administration/monthly_active_users.md b/docs/usage/administration/monthly_active_users.md
index b1da6f17c2..2c9123259e 100644 --- a/docs/usage/administration/monthly_active_users.md +++ b/docs/usage/administration/monthly_active_users.md
@@ -79,6 +79,3 @@ Synapse records several different prometheus metrics for MAU. `synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` . - -`synapse_admin_mau_registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have -registered accounts on the homeserver. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 567bbf88d2..4a4a99dcc0 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md
@@ -1,3 +1,5 @@ +<!-- Document auto-generated by scripts-dev/gen_config_documentation.py --> + # Configuring Synapse This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified @@ -90,32 +92,32 @@ apply if you want your config file to be read properly. A few helpful things to the sub-options, if any, are identified and listed in the body of the section. In addition, each setting has an example of its usage, with the proper indentation shown. - +--- ## Modules Server admins can expand Synapse's functionality with external modules. -See [here](../../modules/index.md) for more -documentation on how to configure or create custom modules for Synapse. - +See [here](../../modules/index.md) for more documentation on how to configure or create custom modules for Synapse. --- ### `modules` -Use the `module` sub-option to add modules under this option to extend functionality. -The `module` setting then has a sub-option, `config`, which can be used to define some configuration -for the `module`. +*(array)* Use the `module` sub-option to add modules under this option to extend functionality. The `module` setting then has a sub-option, `config`, which can be used to define some configuration for the `module`. Defaults to `[]`. + +Options for each entry include: + +* `module` (string): Path to the Python class of the module. -Defaults to none. +* `config` (object): Configuration options for the module. Example configuration: ```yaml modules: - - module: my_super_module.MySuperClass - config: - do_thing: true - - module: my_other_super_module.SomeClass - config: {} +- module: my_super_module.MySuperClass + config: + do_thing: true +- module: my_other_super_module.SomeClass + config: {} ``` --- ## Server @@ -125,46 +127,83 @@ Define your homeserver name and other base options. --- ### `server_name` -This sets the public-facing domain of the server. +*(string)* This sets the public-facing domain of the server. -The `server_name` name will appear at the end of usernames and room addresses -created on your server. For example if the `server_name` was example.com, -usernames on your server would be in the format `@user:example.com` +The `server_name` name will appear at the end of usernames and room addresses created on your server. For example if the `server_name` was example.com, usernames on your server would be in the format `@user:example.com`. -In most cases you should avoid using a matrix specific subdomain such as -matrix.example.com or synapse.example.com as the `server_name` for the same -reasons you wouldn't use user@email.example.com as your email address. -See [here](../../delegate.md) -for information on how to host Synapse on a subdomain while preserving -a clean `server_name`. +In most cases you should avoid using a matrix specific subdomain such as matrix.example.com or synapse.example.com as the `server_name` for the same reasons you wouldn't use user@email.example.com as your email address. See [here](../../delegate.md) for information on how to host Synapse on a subdomain while preserving a clean `server_name`. -The `server_name` cannot be changed later so it is important to -configure this correctly before you start Synapse. It should be all -lowercase and may contain an explicit port. +The `server_name` cannot be changed later so it is important to configure this correctly before you start Synapse. It should be all lowercase and may contain an explicit port. There is no default for this option. -Example configuration #1: +Example configurations: ```yaml server_name: matrix.org ``` -Example configuration #2: + ```yaml server_name: localhost:8080 ``` --- ### `pid_file` -When running Synapse as a daemon, the file to store the pid in. Defaults to none. +*(string|null)* When running Synapse as a daemon, the file to store the pid in. Defaults to `null`. Example configuration: ```yaml pid_file: DATADIR/homeserver.pid ``` --- +### `daemonize` + +*(boolean)* Specifies whether Synapse should be started as a daemon process. If Synapse is being managed by [systemd](../../systemd-with-workers/), this option must be omitted or set to `false`. + +This can also be set by the `--daemonize` (`-D`) argument when starting Synapse. + +See `worker_daemonize` for more information on daemonizing workers. + +Defaults to `false`. + +Example configuration: +```yaml +daemonize: true +``` +--- +### `print_pidfile` + +*(boolean)* Print the path to the pidfile just before daemonizing. + +This can also be set by the `--print-pidfile` argument when starting Synapse. + +Defaults to `false`. + +Example configuration: +```yaml +print_pidfile: true +``` +--- +### `user_agent_suffix` + +*(string|null)* A suffix that is appended to the Synapse user-agent (ex. `Synapse/v1.123.0`). Defaults to `null`. + +Example configuration: +```yaml +user_agent_suffix: ' (I''m a teapot; Linux x86_64)' +``` +--- +### `use_frozen_dicts` + +*(boolean)* Determines whether we should freeze the internal dict object in `FrozenEvent`. Freezing prevents bugs where we accidentally share e.g. signature dicts. However, freezing a dict is expensive. Defaults to `false`. + +Example configuration: +```yaml +use_frozen_dicts: true +``` +--- ### `web_client_location` -The absolute URL to the web client which `/` will redirect to. Defaults to none. +*(string|null)* The absolute URL to the web client which `/` will redirect to. Defaults to `null`. Example configuration: ```yaml @@ -173,14 +212,11 @@ web_client_location: https://riot.example.com/ --- ### `public_baseurl` -The public-facing base URL that clients use to access this Homeserver (not -including _matrix/...). This is the same URL a user might enter into the -'Custom Homeserver URL' field on their client. If you use Synapse with a -reverse proxy, this should be the URL to reach Synapse via the proxy. -Otherwise, it should be the URL to reach Synapse's client HTTP listener (see -['listeners'](#listeners) below). +*(string|null)* The public-facing base URL that clients use to access this Homeserver (not including _matrix/...). This is the same URL a user might enter into the "Custom Homeserver URL" field on their client. If you use Synapse with a reverse proxy, this should be the URL to reach Synapse via the proxy. Otherwise, it should be the URL to reach Synapse's client HTTP listener (see [`listeners`](#listeners) below). -Defaults to `https://<server_name>/`. +If unset or null, `https://<server_name>/` is used. + +Defaults to `null`. Example configuration: ```yaml @@ -189,46 +225,39 @@ public_baseurl: https://example.com/ --- ### `serve_server_wellknown` -By default, other servers will try to reach our server on port 8448, which can -be inconvenient in some environments. +*(boolean)* By default, other servers will try to reach our server on port 8448, which can be inconvenient in some environments. -Provided `https://<server_name>/` on port 443 is routed to Synapse, this -option configures Synapse to serve a file at `https://<server_name>/.well-known/matrix/server`. -This will tell other servers to send traffic to port 443 instead. +Provided `https://<server_name>/` on port 443 is routed to Synapse, this option configures Synapse to serve a file at `https://<server_name>/.well-known/matrix/server`. This will tell other servers to send traffic to port 443 instead. This option currently defaults to false. -See [Delegation of incoming federation traffic](../../delegate.md) for more -information. +See [Delegation of incoming federation traffic](../../delegate.md) for more information. + +Defaults to `false`. Example configuration: ```yaml serve_server_wellknown: true ``` --- -### `extra_well_known_client_content ` +### `extra_well_known_client_content` -This option allows server runners to add arbitrary key-value pairs to the [client-facing `.well-known` response](https://spec.matrix.org/latest/client-server-api/#well-known-uri). -Note that the `public_baseurl` config option must be provided for Synapse to serve a response to `/.well-known/matrix/client` at all. +*(object)* This option allows server runners to add arbitrary key-value pairs to the [client-facing `.well-known` response](https://spec.matrix.org/latest/client-server-api/#well-known-uri). Note that the `public_baseurl` config option must be provided for Synapse to serve a response to `/.well-known/matrix/client` at all. -If this option is provided, it parses the given yaml to json and -serves it on `/.well-known/matrix/client` endpoint -alongside the standard properties. +If this option is provided, it parses the given yaml to json and serves it on `/.well-known/matrix/client` endpoint alongside the standard properties. *Added in Synapse 1.62.0.* Example configuration: ```yaml -extra_well_known_client_content : +extra_well_known_client_content: option1: value1 option2: value2 ``` --- ### `soft_file_limit` -Set the soft limit on the number of file descriptors synapse can use. -Zero is used to indicate synapse should set the soft limit to the hard limit. -Defaults to 0. +*(integer)* Set the soft limit on the number of file descriptors synapse can use. Zero is used to indicate synapse should set the soft limit to the hard limit. Defaults to `0`. Example configuration: ```yaml @@ -237,10 +266,19 @@ soft_file_limit: 3 --- ### `presence` -Presence tracking allows users to see the state (e.g online/offline) -of other local and remote users. Set the `enabled` sub-option to false to -disable presence tracking on this homeserver. Defaults to true. -This option replaces the previous top-level 'use_presence' option. +*(object)* Presence tracking allows users to see the state (e.g online/offline) of other local and remote users. This option replaces the previous top-level `use_presence` option. + +This setting has the following sub-options: + +* `enabled` (boolean|string): Set to false to disable presence tracking on this homeserver. + + Can also be set to a special value of "untracked" which ignores updates received via clients and federation, while still accepting updates from the [module API](../../modules/index.md). + + *The "untracked" option was added in Synapse 1.96.0.* + + Defaults to `true`. + +* `include_offline_users_on_sync` (boolean): When clients perform an initial or `full_state` sync, presence results for offline users are not included by default. Setting `include_offline_users_on_sync` to `true` will always include offline users in the results. Defaults to `false`. Example configuration: ```yaml @@ -248,23 +286,10 @@ presence: enabled: false include_offline_users_on_sync: false ``` - -`enabled` can also be set to a special value of "untracked" which ignores updates -received via clients and federation, while still accepting updates from the -[module API](../../modules/index.md). - -*The "untracked" option was added in Synapse 1.96.0.* - -When clients perform an initial or `full_state` sync, presence results for offline users are -not included by default. Setting `include_offline_users_on_sync` to `true` will always include -offline users in the results. Defaults to false. - --- ### `require_auth_for_profile_requests` -Whether to require authentication to retrieve profile data (avatars, display names) of other -users through the client API. Defaults to false. Note that profile data is also available -via the federation API, unless `allow_profile_lookup_over_federation` is set to false. +*(boolean)* Whether to require authentication to retrieve profile data (avatars, display names) of other users through the client API. Note that profile data is also available via the federation API, unless `allow_profile_lookup_over_federation` is set to false. Defaults to `false`. Example configuration: ```yaml @@ -273,10 +298,7 @@ require_auth_for_profile_requests: true --- ### `limit_profile_requests_to_users_who_share_rooms` -Use this option to require a user to share a room with another user in order -to retrieve their profile information. Only checked on Client-Server -requests. Profile requests from other servers should be checked by the -requesting server. Defaults to false. +*(boolean)* Use this option to require a user to share a room with another user in order to retrieve their profile information. Only checked on Client-Server requests. Profile requests from other servers should be checked by the requesting server. Defaults to `false`. Example configuration: ```yaml @@ -285,11 +307,7 @@ limit_profile_requests_to_users_who_share_rooms: true --- ### `include_profile_data_on_invite` -Use this option to prevent a user's profile data from being retrieved and -displayed in a room until they have joined it. By default, a user's -profile data is included in an invite event, regardless of the values -of the above two settings, and whether or not the users share a server. -Defaults to true. +*(boolean)* Use this option to prevent a user's profile data from being retrieved and displayed in a room until they have joined it. By default, a user's profile data is included in an invite event, regardless of the values of the above two settings, and whether or not the users share a server. Defaults to `true`. Example configuration: ```yaml @@ -298,9 +316,7 @@ include_profile_data_on_invite: false --- ### `allow_public_rooms_without_auth` -If set to true, removes the need for authentication to access the server's -public rooms directory through the client API, meaning that anyone can -query the room directory. Defaults to false. +*(boolean)* If set to true, removes the need for authentication to access the server's public rooms directory through the client API, meaning that anyone can query the room directory. Defaults to `false`. Example configuration: ```yaml @@ -309,8 +325,7 @@ allow_public_rooms_without_auth: true --- ### `allow_public_rooms_over_federation` -If set to true, allows any other homeserver to fetch the server's public -rooms directory via federation. Defaults to false. +*(boolean)* If set to true, allows any other homeserver to fetch the server's public rooms directory via federation. Defaults to `false`. Example configuration: ```yaml @@ -319,50 +334,56 @@ allow_public_rooms_over_federation: true --- ### `default_room_version` -The default room version for newly created rooms on this server. +*(string)* The default room version for newly created rooms on this server. Known room versions are listed [here](https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions) -For example, for room version 1, `default_room_version` should be set -to "1". - -Currently defaults to ["10"](https://spec.matrix.org/v1.5/rooms/v10/). +For example, for room version 1, `default_room_version` should be set to "1". _Changed in Synapse 1.76:_ the default version room version was increased from [9](https://spec.matrix.org/v1.5/rooms/v9/) to [10](https://spec.matrix.org/v1.5/rooms/v10/). +Defaults to `"10"`. + Example configuration: ```yaml -default_room_version: "8" +default_room_version: '8' ``` --- ### `gc_thresholds` -The garbage collection threshold parameters to pass to `gc.set_threshold`, if defined. -Defaults to none. +*(array|null)* The garbage collection threshold parameters to pass to `gc.set_threshold`, if defined. Defaults to `null`. Example configuration: ```yaml -gc_thresholds: [700, 10, 10] +gc_thresholds: +- 700 +- 10 +- 10 ``` --- ### `gc_min_interval` -The minimum time in seconds between each GC for a generation, regardless of -the GC thresholds. This ensures that we don't do GC too frequently. A value of `[1s, 10s, 30s]` -indicates that a second must pass between consecutive generation 0 GCs, etc. +*(array)* The minimum time in seconds between each GC for a generation, regardless of the GC thresholds. This ensures that we don't do GC too frequently. A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive generation 0 GCs, etc. -Defaults to `[1s, 10s, 30s]`. +Default configuration: +```yaml +gc_min_interval: +- 1s +- 10s +- 30s +``` Example configuration: ```yaml -gc_min_interval: [0.5s, 30s, 1m] +gc_min_interval: +- 0.5s +- 30s +- 1m ``` --- ### `filter_timeline_limit` -Set the limit on the returned events in the timeline in the get -and sync operations. Defaults to 100. A value of -1 means no upper limit. - +*(integer)* Set the limit on the returned events in the timeline in the get and sync operations. A value of -1 means no upper limit. Defaults to `100`. Example configuration: ```yaml @@ -371,8 +392,7 @@ filter_timeline_limit: 5000 --- ### `block_non_admin_invites` -Whether room invites to users on this server should be blocked -(except those sent by local server admins). Defaults to false. +*(boolean)* Whether room invites to users on this server should be blocked (except those sent by local server admins). Defaults to `false`. Example configuration: ```yaml @@ -381,8 +401,7 @@ block_non_admin_invites: true --- ### `enable_search` -If set to false, new messages will not be indexed for searching and users -will receive errors when searching for messages. Defaults to true. +*(boolean)* If set to false, new messages will not be indexed for searching and users will receive errors when searching for messages. Defaults to `true`. Example configuration: ```yaml @@ -391,219 +410,196 @@ enable_search: false --- ### `ip_range_blacklist` -This option prevents outgoing requests from being sent to the specified blacklisted IP address -CIDR ranges. If this option is not specified then it defaults to private IP -address ranges (see the example below). +*(array)* This option prevents outgoing requests from being sent to the specified blacklisted IP address CIDR ranges. If this option is not specified then it defaults to private IP address ranges (see the example below). -The blacklist applies to the outbound requests for federation, identity servers, -push servers, and for checking key validity for third-party invite events. +The blacklist applies to the outbound requests for federation, identity servers, push servers, and for checking key validity for third-party invite events. -(0.0.0.0 and :: are always blacklisted, whether or not they are explicitly -listed here, since they correspond to unroutable addresses.) +(0.0.0.0 and :: are always blacklisted, whether or not they are explicitly listed here, since they correspond to unroutable addresses.) This option replaces `federation_ip_range_blacklist` in Synapse v1.25.0. Note: The value is ignored when an HTTP proxy is in use. -Example configuration: +Default configuration: ```yaml ip_range_blacklist: - - '127.0.0.0/8' - - '10.0.0.0/8' - - '172.16.0.0/12' - - '192.168.0.0/16' - - '100.64.0.0/10' - - '192.0.0.0/24' - - '169.254.0.0/16' - - '192.88.99.0/24' - - '198.18.0.0/15' - - '192.0.2.0/24' - - '198.51.100.0/24' - - '203.0.113.0/24' - - '224.0.0.0/4' - - '::1/128' - - 'fe80::/10' - - 'fc00::/7' - - '2001:db8::/32' - - 'ff00::/8' - - 'fec0::/10' +- 127.0.0.0/8 +- 10.0.0.0/8 +- 172.16.0.0/12 +- 192.168.0.0/16 +- 100.64.0.0/10 +- 192.0.0.0/24 +- 169.254.0.0/16 +- 192.88.99.0/24 +- 198.18.0.0/15 +- 192.0.2.0/24 +- 198.51.100.0/24 +- 203.0.113.0/24 +- 224.0.0.0/4 +- ::1/128 +- fe80::/10 +- fc00::/7 +- 2001:db8::/32 +- ff00::/8 +- fec0::/10 ``` --- ### `ip_range_whitelist` -List of IP address CIDR ranges that should be allowed for federation, -identity servers, push servers, and for checking key validity for -third-party invite events. This is useful for specifying exceptions to -wide-ranging blacklisted target IP ranges - e.g. for communication with -a push server only visible in your network. +*(array)* List of IP address CIDR ranges that should be allowed for federation, identity servers, push servers, and for checking key validity for third-party invite events. This is useful for specifying exceptions to wide-ranging blacklisted target IP ranges – e.g. for communication with a push server only visible in your network. -This whitelist overrides `ip_range_blacklist` and defaults to an empty -list. +This whitelist overrides `ip_range_blacklist`. + +Defaults to `[]`. Example configuration: ```yaml ip_range_whitelist: - - '192.168.1.1' +- 192.168.1.1 ``` --- ### `listeners` -List of ports that Synapse should listen on, their purpose and their -configuration. +*(array)* List of ports that Synapse should listen on, their purpose and their configuration. -Sub-options for each listener include: +Valid resource names are: -* `port`: the TCP port to bind to. +* `client`: the client-server API (/_matrix/client). Also implies `media` and `static`. If configuring the main process, the Synapse Admin API (/_synapse/admin) is also implied. -* `tag`: An alias for the port in the logger name. If set the tag is logged instead -of the port. Default to `None`, is optional and only valid for listener with `type: http`. -See the docs [request log format](../administration/request_log.md). +* `consent`: user consent forms (/_matrix/consent). See [here](../../consent_tracking.md) for more. -* `bind_addresses`: a list of local addresses to listen on. The default is - 'all local interfaces'. +* `federation`: the server-server API (/_matrix/federation). Also implies `media`, `keys`, `openid` -* `type`: the type of listener. Normally `http`, but other valid options are: +* `keys`: the key discovery API (/_matrix/key). - * `manhole`: (see the docs [here](../../manhole.md)), +* `media`: the media API (/_matrix/media). - * `metrics`: (see the docs [here](../../metrics-howto.md)), +* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets) -* `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path. +* `openid`: OpenID authentication. See [here](../../openid.md). -* `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is - behind a [reverse-proxy](../../reverse_proxy.md). +* `replication`: the HTTP replication API (/_synapse/replication). See [here](../../workers.md). -* `request_id_header`: The header extracted from each incoming request that is - used as the basis for the request ID. The request ID is used in - [logs](../administration/request_log.md#request-log-format) and tracing to - correlate and match up requests. When unset, Synapse will automatically - generate sequential request IDs. This option is useful when Synapse is behind - a [reverse-proxy](../../reverse_proxy.md). +* `static`: static resources under synapse/static (/_matrix/static). (Mostly useful for "fallback authentication".) - _Added in Synapse 1.68.0._ +* `health`: the [health check endpoint](../../reverse_proxy.md#health-check-endpoint). This endpoint is by default active for all other resources and does not have to be activated separately. This is only useful if you want to use the health endpoint explicitly on a dedicated port or for [workers](../../workers.md) and containers without listener e.g. [application services](../../workers.md#notifying-application-services). -* `resources`: Only valid for an 'http' listener. A list of resources to host - on this port. Sub-options for each resource are: +Defaults to `[]`. - * `names`: a list of names of HTTP resources. See below for a list of valid resource names. +Options for each entry include: - * `compress`: set to true to enable gzip compression on HTTP bodies for this resource. This is currently only supported with the - `client`, `consent`, `metrics` and `federation` resources. +* `port` (integer): The TCP port to bind to. -* `additional_resources`: Only valid for an 'http' listener. A map of - additional endpoints which should be loaded via dynamic modules. +* `tag` (string|null): An alias for the port in the logger name. If set the tag is logged instead of the port. Default to `None`, is optional and only valid for listener with `type: http`. See the docs [request log format](../administration/request_log.md). -Unix socket support (_Added in Synapse 1.89.0_): -* `path`: A path and filename for a Unix socket. Make sure it is located in a - directory with read and write permissions, and that it already exists (the directory - will not be created). Defaults to `None`. - * **Note**: The use of both `path` and `port` options for the same `listener` is not - compatible. - * The `x_forwarded` option defaults to true when using Unix sockets and can be omitted. - * Other options that would not make sense to use with a UNIX socket, such as - `bind_addresses` and `tls` will be ignored and can be removed. -* `mode`: The file permissions to set on the UNIX socket. Defaults to `666` -* **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`). - Also make sure that `metrics` is not included in `resources` -> `names` +* `bind_addresses` (array|null): A list of local addresses to listen on. The default is "all local interfaces". +* `type` (string): The type of listener. Normally `http`, but other valid options are [`manhole`](../../manhole.md) and [`metrics`](../../metrics-howto.md). -Valid resource names are: +* `tls` (boolean): Set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path/tls_certificate_path. -* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`. +* `x_forwarded` (boolean): Only valid for an `http` listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is behind a [reverse-proxy](../../reverse_proxy.md). -* `consent`: user consent forms (/_matrix/consent). See [here](../../consent_tracking.md) for more. +* `request_id_header` (string|null): The header extracted from each incoming request that is used as the basis for the request ID. The request ID is used in [logs](../administration/request_log.md#request-log-format) and tracing to correlate and match up requests. When unset, Synapse will automatically generate sequential request IDs. This option is useful when Synapse is behind a [reverse-proxy](../../reverse_proxy.md). -* `federation`: the server-server API (/_matrix/federation). Also implies `media`, `keys`, `openid` + _Added in Synapse 1.68.0._ -* `keys`: the key discovery API (/_matrix/key). +* `resources` (array): Only valid for an `http` listener. A list of resources to host on this port. -* `media`: the media API (/_matrix/media). + Options for each entry include: -* `metrics`: the metrics interface. See [here](../../metrics-howto.md). (Not compatible with Unix sockets) + * `names` (array): A list of names of HTTP resources. See below for a list of valid resource names. -* `openid`: OpenID authentication. See [here](../../openid.md). + * `compress` (boolean): Set to true to enable gzip compression on HTTP bodies for this resource. This is currently only supported with the `client`, `consent`, `metrics` and `federation` resources. -* `replication`: the HTTP replication API (/_synapse/replication). See [here](../../workers.md). +* `additional_resources` (object): Only valid for an `http` listener. A map of additional endpoints which should be loaded via dynamic modules. -* `static`: static resources under synapse/static (/_matrix/static). (Mostly useful for 'fallback authentication'.) +* `path` (string): A path and filename for a Unix socket. Make sure it is located in a directory with read and write permissions, and that it already exists (the directory will not be created). Defaults to `None`. + * **Note**: The use of both `path` and `port` options for the same `listener` is not compatible. + * The `x_forwarded` option defaults to true when using Unix sockets and can be omitted. + * Other options that would not make sense to use with a UNIX socket, such as `bind_addresses` and `tls` will be ignored and can be removed. + + _Added in Synapse 1.89.0_: Unix socket support + +* `mode` (integer|null): The file permissions to set on the UNIX socket. Defaults to `666` if unset or null. + + **Note:** Must be set as `type: http` (does not support `metrics` and `manhole`). Also make sure that `metrics` is not included in `resources` -> `names` -* `health`: the [health check endpoint](../../reverse_proxy.md#health-check-endpoint). This endpoint - is by default active for all other resources and does not have to be activated separately. - This is only useful if you want to use the health endpoint explicitly on a dedicated port or - for [workers](../../workers.md) and containers without listener e.g. - [application services](../../workers.md#notifying-application-services). + _Added in Synapse 1.89.0_: Unix socket support -Example configuration #1: +Example configurations: ```yaml listeners: - # TLS-enabled listener: for when matrix traffic is sent directly to synapse. - # - # (Note that you will also need to give Synapse a TLS key and certificate: see the TLS section - # below.) - # - - port: 8448 - type: http - tls: true - resources: - - names: [client, federation] +- port: 8448 + type: http + tls: true + resources: + - names: + - client + - federation ``` -Example configuration #2: + ```yaml listeners: - # Insecure HTTP listener: for when matrix traffic passes through a reverse proxy - # that unwraps TLS. - # - # If you plan to use a reverse proxy, please see - # https://element-hq.github.io/synapse/latest/reverse_proxy.html. - # - - port: 8008 - tls: false - type: http - x_forwarded: true - bind_addresses: ['::1', '127.0.0.1'] - - resources: - - names: [client, federation] - compress: false - - # example additional_resources: - additional_resources: - "/_matrix/my/custom/endpoint": - module: my_module.CustomRequestHandler - config: {} - - # Turn on the twisted ssh manhole service on localhost on the given - # port. - - port: 9000 - bind_addresses: ['::1', '127.0.0.1'] - type: manhole +- port: 8008 + tls: false + type: http + x_forwarded: true + bind_addresses: + - ::1 + - 127.0.0.1 + resources: + - names: + - client + - federation + compress: false + additional_resources: + /_matrix/my/custom/endpoint: + module: my_module.CustomRequestHandler + config: {} +- port: 9000 + bind_addresses: + - ::1 + - 127.0.0.1 + type: manhole ``` -Example configuration #3: + ```yaml listeners: - # Unix socket listener: Ideal for Synapse deployments behind a reverse proxy, offering - # lightweight interprocess communication without TCP/IP overhead, avoid port - # conflicts, and providing enhanced security through system file permissions. - # - # Note that x_forwarded will default to true, when using a UNIX socket. Please see - # https://element-hq.github.io/synapse/latest/reverse_proxy.html. - # - - path: /run/synapse/main_public.sock - type: http - resources: - - names: [client, federation] +- path: /run/synapse/main_public.sock + type: http + resources: + - names: + - client + - federation ``` +--- +### `manhole` + +*(integer|null)* Turn on the Twisted telnet manhole service on the given port. + +This can also be set by the `--manhole` argument when starting Synapse. + +Defaults to `null`. +Example configuration: +```yaml +manhole: 1234 +``` --- ### `manhole_settings` -Connection settings for the manhole. You can find more information -on the manhole [here](../../manhole.md). Manhole sub-options include: -* `username` : the username for the manhole. This defaults to 'matrix'. -* `password`: The password for the manhole. This defaults to 'rabbithole'. -* `ssh_priv_key_path` and `ssh_pub_key_path`: The private and public SSH key pair used to encrypt the manhole traffic. - If these are left unset, then hardcoded and non-secret keys are used, - which could allow traffic to be intercepted if sent over a public network. +*(object)* Connection settings for the manhole. You can find more information on the manhole [here](../../manhole.md). + +This setting has the following sub-options: + +* `username` (string|null): The username for the manhole. This defaults to "matrix". + +* `password` (string|null): The password for the manhole. This defaults to "rabbithole". + +* `ssh_priv_key_path` (string|null): The private SSH key used to encrypt the manhole traffic. If left unset, then hardcoded and non-secret keys are used, which could allow traffic to be intercepted if sent over a public network. + +* `ssh_pub_key_path` (string|null): The public SSH key corresponsing to `ssh_priv_key_path`. If left unset, a hardcoded key is used. Example configuration: ```yaml @@ -616,15 +612,11 @@ manhole_settings: --- ### `dummy_events_threshold` -Forward extremities can build up in a room due to networking delays between -homeservers. Once this happens in a large room, calculation of the state of -that room can become quite expensive. To mitigate this, once the number of -forward extremities reaches a given threshold, Synapse will send an -`org.matrix.dummy_event` event, which will reduce the forward extremities -in the room. +*(integer)* Forward extremities can build up in a room due to networking delays between homeservers. Once this happens in a large room, calculation of the state of that room can become quite expensive. To mitigate this, once the number of forward extremities reaches a given threshold, Synapse will send an `org.matrix.dummy_event` event, which will reduce the forward extremities in the room. This setting defines the threshold (i.e. number of forward extremities in the room) at which dummy events are sent. -The default value is 10. + +Defaults to `10`. Example configuration: ```yaml @@ -633,14 +625,13 @@ dummy_events_threshold: 5 --- ### `delete_stale_devices_after` -An optional duration. If set, Synapse will run a daily background task to log out and -delete any device that hasn't been accessed for more than the specified amount of time. +An optional duration. If set, Synapse will run a daily background task to log out and delete any device that hasn't been accessed for more than the specified amount of time. -Defaults to no duration, which means devices are never pruned. +A value of null means devices are never pruned. -**Note:** This task will always run on the main process, regardless of the value of -`run_background_tasks_on`. This is due to workers currently not having the ability to -delete devices. +**Note:** This task will always run on the main process, regardless of the value of `run_background_tasks_on`. This is due to workers currently not having the ability to delete devices. + +Defaults to `null`. Example configuration: ```yaml @@ -649,152 +640,187 @@ delete_stale_devices_after: 1y --- ### `email` -Configuration for sending emails from Synapse. +*(object)* Configuration for sending emails from Synapse. -Server admins can configure custom templates for email content. See -[here](../../templates.md) for more information. +Server admins can configure custom templates for email content. See [here](../../templates.md) for more information. This setting has the following sub-options: -* `smtp_host`: The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. -* `smtp_port`: The port on the mail server for outgoing SMTP. Defaults to 465 if `force_tls` is true, else 25. + +* `smtp_host` (string): The hostname of the outgoing SMTP server to use. Defaults to `"localhost"`. + +* `smtp_port` (string|null): The port on the mail server for outgoing SMTP. If null or unset, 465 is used if `force_tls` is true, else 25. _Changed in Synapse 1.64.0:_ the default port is now aware of `force_tls`. -* `smtp_user` and `smtp_pass`: Username/password for authentication to the SMTP server. By default, no - authentication is attempted. -* `force_tls`: By default, Synapse connects over plain text and then optionally upgrades - to TLS via STARTTLS. If this option is set to true, TLS is used from the start (Implicit TLS), - and the option `require_transport_security` is ignored. - It is recommended to enable this if supported by your mail server. + + Defaults to `null`. + +* `smtp_user` (string|null): Username for authentication to the SMTP server. Defaults to `null`. + +* `smtp_pass` (string|null): Password for authentication to the SMTP server. Defaults to `null`. + +* `force_tls` (boolean): By default, Synapse connects over plain text and then optionally upgrades to TLS via STARTTLS. If this option is set to true, TLS is used from the start (Implicit TLS), and the option `require_transport_security` is ignored. It is recommended to enable this if supported by your mail server. _New in Synapse 1.64.0._ -* `require_transport_security`: Set to true to require TLS transport security for SMTP. - By default, Synapse will connect over plain text, and will then switch to - TLS via STARTTLS *if the SMTP server supports it*. If this option is set, - Synapse will refuse to connect unless the server supports STARTTLS. -* `enable_tls`: By default, if the server supports TLS, it will be used, and the server - must present a certificate that is valid for 'smtp_host'. If this option - is set to false, TLS will not be used. -* `notif_from`: defines the "From" address to use when sending emails. - It must be set if email sending is enabled. The placeholder '%(app)s' will be replaced by the application name, - which is normally set in `app_name`, but may be overridden by the - Matrix client application. Note that the placeholder must be written '%(app)s', including the - trailing 's'. -* `app_name`: `app_name` defines the default value for '%(app)s' in `notif_from` and email - subjects. It defaults to 'Matrix'. -* `enable_notifs`: Set to true to allow users to receive e-mail notifications. If this is not set, - users can configure e-mail notifications but will not receive them. Disabled by default. -* `notif_for_new_users`: Set to false to disable automatic subscription to email - notifications for new users. Enabled by default. -* `notif_delay_before_mail`: The time to wait before emailing about a notification. - This gives the user a chance to view the message via push or an open client. - Defaults to 10 minutes. + + Defaults to `false`. + +* `require_transport_security` (boolean): Set to true to require TLS transport security for SMTP. By default, Synapse will connect over plain text, and will then switch to TLS via STARTTLS *if the SMTP server supports it*. If this option is set, Synapse will refuse to connect unless the server supports STARTTLS. Defaults to `false`. + +* `enable_tls` (boolean): By default, if the server supports TLS, it will be used, and the server must present a certificate that is valid for `tlsname`. If this option is set to false, TLS will not be used. Defaults to `true`. + +* `tlsname` (string): The domain name the SMTP server's TLS certificate must be valid for, defaulting to `smtp_host`. + +* `notif_from` (string|null): Defines the "From" address to use when sending emails. It must be set if email sending is enabled. The placeholder `%(app)s` will be replaced by the application name, which is normally set in `app_name`, but may be overridden by the Matrix client application. Note that the placeholder must be written `%(app)s`, including the trailing 's'. Defaults to `null`. + +* `app_name` (string): Defines the default value for `%(app)s` in `notif_from` and email subjects. Defaults to `"Matrix"`. + +* `enable_notifs` (boolean): Set to true to allow users to receive e-mail notifications. If this is not set, users can configure e-mail notifications but will not receive them. Defaults to `false`. + +* `notif_for_new_users` (boolean): Set to false to disable automatic subscription to email notifications for new users. Defaults to `true`. + +* `notif_delay_before_mail` (duration): The time to wait before emailing about a notification. This gives the user a chance to view the message via push or an open client. _New in Synapse 1.99.0._ -* `client_base_url`: Custom URL for client links within the email notifications. By default - links will be based on "https://matrix.to". (This setting used to be called `riot_base_url`; - the old name is still supported for backwards-compatibility but is now deprecated.) -* `validation_token_lifetime`: Configures the time that a validation email will expire after sending. - Defaults to 1h. -* `invite_client_location`: The web client location to direct users to during an invite. This is passed - to the identity server as the `org.matrix.web_client_location` key. Defaults - to unset, giving no guidance to the identity server. -* `subjects`: Subjects to use when sending emails from Synapse. The placeholder '%(app)s' will - be replaced with the value of the `app_name` setting, or by a value dictated by the Matrix client application. - In addition, each subject can use the following placeholders: '%(person)s', which will be replaced by the displayname - of the user(s) that sent the message(s), e.g. "Alice and Bob", and '%(room)s', which will be replaced by the name of the room the - message(s) have been sent to, e.g. "My super room". In addition, emails related to account administration will - can use the '%(server_name)s' placeholder, which will be replaced by the value of the - `server_name` setting in your Synapse configuration. - - Here is a list of subjects for notification emails that can be set: - * `message_from_person_in_room`: Subject to use to notify about one message from one or more user(s) in a - room which has a name. Defaults to "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..." - * `message_from_person`: Subject to use to notify about one message from one or more user(s) in a - room which doesn't have a name. Defaults to "[%(app)s] You have a message on %(app)s from %(person)s..." - * `messages_from_person`: Subject to use to notify about multiple messages from one or more users in - a room which doesn't have a name. Defaults to "[%(app)s] You have messages on %(app)s from %(person)s..." - * `messages_in_room`: Subject to use to notify about multiple messages in a room which has a - name. Defaults to "[%(app)s] You have messages on %(app)s in the %(room)s room..." - * `messages_in_room_and_others`: Subject to use to notify about multiple messages in multiple rooms. - Defaults to "[%(app)s] You have messages on %(app)s in the %(room)s room and others..." - * `messages_from_person_and_others`: Subject to use to notify about multiple messages from multiple persons in - multiple rooms. This is similar to the setting above except it's used when - the room in which the notification was triggered has no name. Defaults to - "[%(app)s] You have messages on %(app)s from %(person)s and others..." - * `invite_from_person_to_room`: Subject to use to notify about an invite to a room which has a name. - Defaults to "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..." - * `invite_from_person`: Subject to use to notify about an invite to a room which doesn't have a - name. Defaults to "[%(app)s] %(person)s has invited you to chat on %(app)s..." - * `password_reset`: Subject to use when sending a password reset email. Defaults to "[%(server_name)s] Password reset" - * `email_validation`: Subject to use when sending a verification email to assert an address's - ownership. Defaults to "[%(server_name)s] Validate your email" -Example configuration: + Defaults to `"10m"`. + +* `client_base_url` (string): Custom URL for client links within the email notifications. (This setting used to be called `riot_base_url`; the old name is still supported for backwards-compatibility but is now deprecated.) Defaults to `"https://matrix.to"`. + +* `validation_token_lifetime` (duration): Configures the time that a validation email will expire after sending. Defaults to `"1h"`. + +* `invite_client_location` (string|null): The web client location to direct users to during an invite. This is passed to the identity server as the `org.matrix.web_client_location` key. If null or unset no guidance is given to the identity server. Defaults to `null`. + +* `subjects` (object): Subjects to use when sending emails from Synapse. The placeholder `%(app)s` will be replaced with the value of the `app_name` setting, or by a value dictated by the Matrix client application. In addition, each subject can use the following placeholders: `%(person)s`, which will be replaced by the displayname of the user(s) that sent the message(s), e.g. "Alice and Bob", and `%(room)s`, which will be replaced by the name of the room the message(s) have been sent to, e.g. "My super room". In addition, emails related to account administration will can use the `%(server_name)s` placeholder, which will be replaced by the value of the `server_name` setting in your Synapse configuration. + + This setting has the following sub-options: + + * `message_from_person_in_room` (string): Subject to use to notify about one message from one or more user(s) in a room which has a name. Defaults to `"[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..."`. + + * `message_from_person` (string): Subject to use to notify about one message from one or more user(s) in a room which doesn't have a name. Defaults to `"[%(app)s] You have a message on %(app)s from %(person)s..."`. + + * `messages_from_person` (string): Subject to use to notify about multiple messages from one or more users in a room which doesn't have a name. Defaults to `"[%(app)s] You have messages on %(app)s from %(person)s..."`. + + * `messages_in_room` (string): Subject to use to notify about multiple messages in a room which has a name. Defaults to `"[%(app)s] You have messages on %(app)s in the %(room)s room..."`. + + * `messages_in_room_and_others` (string): Subject to use to notify about multiple messages in multiple rooms. Defaults to `"[%(app)s] You have messages on %(app)s in the %(room)s room and others..."`. + + * `messages_from_person_and_others` (string): Subject to use to notify about multiple messages from multiple persons in multiple rooms. This is similar to the setting above except it's used when the room in which the notification was triggered has no name. Defaults to `"[%(app)s] You have messages on %(app)s from %(person)s and others..."`. + + * `invite_from_person_to_room` (string): Subject to use to notify about an invite to a room which has a name. Defaults to `"[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..."`. + * `invite_from_person` (string): Subject to use to notify about an invite to a room which doesn't have a name. Defaults to `"[%(app)s] %(person)s has invited you to chat on %(app)s..."`. + + * `password_reset` (string): Subject to use when sending a password reset email. Defaults to `"[%(server_name)s] Password reset"`. + + * `email_validation` (string): Subject to use when sending a verification email to assert an address's ownership. Defaults to `"[%(server_name)s] Validate your email"`. + +Example configuration: ```yaml email: smtp_host: mail.server smtp_port: 587 - smtp_user: "exampleusername" - smtp_pass: "examplepassword" + smtp_user: exampleusername + smtp_pass: examplepassword force_tls: true require_transport_security: true enable_tls: false - notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>" + tlsname: mail.server.example.com + notif_from: Your Friendly %(app)s homeserver <noreply@example.com> app_name: my_branded_matrix_server enable_notifs: true notif_for_new_users: false - client_base_url: "http://localhost/riot" + client_base_url: http://localhost/riot validation_token_lifetime: 15m invite_client_location: https://app.element.io - subjects: - message_from_person_in_room: "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..." - message_from_person: "[%(app)s] You have a message on %(app)s from %(person)s..." - messages_from_person: "[%(app)s] You have messages on %(app)s from %(person)s..." - messages_in_room: "[%(app)s] You have messages on %(app)s in the %(room)s room..." - messages_in_room_and_others: "[%(app)s] You have messages on %(app)s in the %(room)s room and others..." - messages_from_person_and_others: "[%(app)s] You have messages on %(app)s from %(person)s and others..." - invite_from_person_to_room: "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..." - invite_from_person: "[%(app)s] %(person)s has invited you to chat on %(app)s..." - password_reset: "[%(server_name)s] Password reset" - email_validation: "[%(server_name)s] Validate your email" + message_from_person_in_room: '[%(app)s] You have a message on %(app)s from %(person)s + in the %(room)s room...' + message_from_person: '[%(app)s] You have a message on %(app)s from %(person)s...' + messages_from_person: '[%(app)s] You have messages on %(app)s from %(person)s...' + messages_in_room: '[%(app)s] You have messages on %(app)s in the %(room)s room...' + messages_in_room_and_others: '[%(app)s] You have messages on %(app)s in the %(room)s + room and others...' + messages_from_person_and_others: '[%(app)s] You have messages on %(app)s from + %(person)s and others...' + invite_from_person_to_room: '[%(app)s] %(person)s has invited you to join the + %(room)s room on %(app)s...' + invite_from_person: '[%(app)s] %(person)s has invited you to chat on %(app)s...' + password_reset: '[%(server_name)s] Password reset' + email_validation: '[%(server_name)s] Validate your email' +``` +--- +### `max_event_delay_duration` + +The maximum allowed duration by which sent events can be delayed, as per [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140). Must be a positive value if set. + +If null or unset, sending of delayed events is disallowed. + +Defaults to `null`. + +Example configuration: +```yaml +max_event_delay_duration: 24h ``` +--- +### `user_types` + +*(object)* Configuration settings related to the user types feature. + +This setting has the following sub-options: +* `default_user_type` (string|null): The default user type to use for registering new users when no value has been specified. Defaults to none. Defaults to `null`. + +* `extra_user_types` (list): Array of additional user types to allow. These are treated as real users. Defaults to `[]`. + +Example configuration: +```yaml +user_types: + default_user_type: custom + extra_user_types: + - custom + - custom2 +``` +--- ## Homeserver blocking + Useful options for Synapse admins. --- - ### `admin_contact` -How to reach the server admin, used in `ResourceLimitError`. Defaults to none. +*(string|null)* How to reach the server admin, used in `ResourceLimitError`. Defaults to `null`. Example configuration: ```yaml -admin_contact: 'mailto:admin@server.com' +admin_contact: mailto:admin@server.com ``` --- -### `hs_disabled` and `hs_disabled_message` +### `hs_disabled` -Blocks users from connecting to the homeserver and provides a human-readable reason -why the connection was blocked. Defaults to false. +*(boolean)* Blocks users from connecting to the homeserver and provides the human-readable reason given in `hs_disabled_message`. Defaults to `false`. Example configuration: ```yaml hs_disabled: true -hs_disabled_message: 'Reason for why the HS is blocked' +``` +--- +### `hs_disabled_message` + +*(string)* Human-readable reason why the connection was blocked. Defaults to `"Homeserver is currently blocked"`. + +Example configuration: +```yaml +hs_disabled_message: Reason for why the HS is blocked ``` --- ### `limit_usage_by_mau` -This option disables/enables monthly active user blocking. Used in cases where the admin or -server owner wants to limit to the number of monthly active users. When enabled and a limit is -reached the server returns a `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`. -Defaults to false. If this is enabled, a value for `max_mau_value` must also be set. +*(boolean)* This option disables/enables monthly active user blocking. Used in cases where the admin or server owner wants to limit to the number of monthly active users. When enabled and a limit is reached the server returns a `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`. If this is enabled, a value for `max_mau_value` must also be set. See [Monthly Active Users](../administration/monthly_active_users.md) for details on how to configure MAU. +Defaults to `false`. + Example configuration: ```yaml limit_usage_by_mau: true @@ -802,8 +828,7 @@ limit_usage_by_mau: true --- ### `max_mau_value` -This option sets the hard limit of monthly active users above which the server will start -blocking user actions if `limit_usage_by_mau` is enabled. Defaults to 0. +*(integer)* This option sets the hard limit of monthly active users above which the server will start blocking user actions if `limit_usage_by_mau` is enabled. Defaults to `0`. Example configuration: ```yaml @@ -812,11 +837,7 @@ max_mau_value: 50 --- ### `mau_trial_days` -The option `mau_trial_days` is a means to add a grace period for active users. It -means that users must be active for the specified number of days before they -can be considered active and guards against the case where lots of users -sign up in a short space of time never to return after their initial -session. Defaults to 0. +*(integer)* The option `mau_trial_days` is a means to add a grace period for active users. It means that users must be active for the specified number of days before they can be considered active and guards against the case where lots of users sign up in a short space of time never to return after their initial session. Defaults to `0`. Example configuration: ```yaml @@ -825,10 +846,7 @@ mau_trial_days: 5 --- ### `mau_appservice_trial_days` -The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but applies a different -trial number if the user was registered by an appservice. A value -of 0 means no trial days are applied. Appservices not listed in this dictionary -use the value of `mau_trial_days` instead. +*(object)* The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but applies a different trial number if the user was registered by an appservice. A value of 0 means no trial days are applied. Appservices not listed in this dictionary use the value of `mau_trial_days` instead. Defaults to `{}`. Example configuration: ```yaml @@ -839,11 +857,7 @@ mau_appservice_trial_days: --- ### `mau_limit_alerting` -The option `mau_limit_alerting` is a means of limiting client-side alerting -should the mau limit be reached. This is useful for small instances -where the admin has 5 mau seats (say) for 5 specific people and no -interest increasing the mau limit further. Defaults to true, which -means that alerting is enabled. +*(boolean)* Limit client-side alerting should the mau limit be reached. This is useful for small instances where the admin has 5 mau seats (say) for 5 specific people and no interest increasing the mau limit further. Defaults to `true`. Example configuration: ```yaml @@ -852,33 +866,16 @@ mau_limit_alerting: false --- ### `mau_stats_only` -If enabled, the metrics for the number of monthly active users will -be populated, however no one will be limited based on these numbers. If `limit_usage_by_mau` -is true, this is implied to be true. Defaults to false. +*(boolean)* If enabled, the metrics for the number of monthly active users will be populated, however no one will be limited based on these numbers. If `limit_usage_by_mau` is true, this is implied to be true. Defaults to `false`. Example configuration: ```yaml mau_stats_only: true ``` --- -### `mau_limit_reserved_threepids` - -Sometimes the server admin will want to ensure certain accounts are -never blocked by mau checking. These accounts are specified by this option. -Defaults to none. Add accounts by specifying the `medium` and `address` of the -reserved threepid (3rd party identifier). - -Example configuration: -```yaml -mau_limit_reserved_threepids: - - medium: 'email' - address: 'reserved_user@example.com' -``` ---- ### `server_context` -This option is used by phonehome stats to group together related servers. -Defaults to none. +*(string|null)* This option is used by phonehome stats to group together related servers. Defaults to `null`. Example configuration: ```yaml @@ -887,32 +884,30 @@ server_context: context --- ### `limit_remote_rooms` -When this option is enabled, the room "complexity" will be checked before a user -joins a new remote room. If it is above the complexity limit, the server will -disallow joining, or will instantly leave. This is useful for homeservers that are -resource-constrained. Options for this setting include: -* `enabled`: whether this check is enabled. Defaults to false. -* `complexity`: the limit above which rooms cannot be joined. The default is 1.0. -* `complexity_error`: override the error which is returned when the room is too complex with a - custom message. -* `admins_can_join`: allow server admins to join complex rooms. Default is false. +*(object)* When this option is enabled, the room "complexity" will be checked before a user joins a new remote room. If it is above the complexity limit, the server will disallow joining, or will instantly leave. This is useful for homeservers that are resource-constrained. Room complexity is an arbitrary measure based on factors such as the number of users in the room. -Room complexity is an arbitrary measure based on factors such as the number of -users in the room. +This setting has the following sub-options: + +* `enabled` (boolean): Whether this check is enabled. Defaults to `false`. + +* `complexity` (number): The limit above which rooms cannot be joined. Defaults to `1.0`. + +* `complexity_error` (string): Override the error which is returned when the room is too complex with a custom message. Defaults to `"Your homeserver is unable to join rooms this large or complex. Please speak to your server administrator, or upgrade your instance to join this room."`. + +* `admins_can_join` (boolean): Allow server admins to join complex rooms. Defaults to `false`. Example configuration: ```yaml limit_remote_rooms: enabled: true complexity: 0.5 - complexity_error: "I can't let you do that, Dave." + complexity_error: I can't let you do that, Dave. admins_can_join: true ``` --- ### `require_membership_for_aliases` -Whether to require a user to be in the room to add an alias to it. -Defaults to true. +*(boolean)* Whether to require a user to be in the room to add an alias to it. Defaults to `true`. Example configuration: ```yaml @@ -921,9 +916,7 @@ require_membership_for_aliases: false --- ### `allow_per_room_profiles` -Whether to allow per-room membership profiles through the sending of membership -events with profile information that differs from the target's global profile. -Defaults to true. +*(boolean)* Whether to allow per-room membership profiles through the sending of membership events with profile information that differs from the target's global profile. Defaults to `true`. Example configuration: ```yaml @@ -932,11 +925,12 @@ allow_per_room_profiles: false --- ### `max_avatar_size` -The largest permissible file size in bytes for a user avatar. Defaults to no restriction. -Use M for MB and K for KB. +The largest permissible file size in bytes for a user avatar. Defaults to no restriction. Use M for MB and K for KB. Note that user avatar changes will not work if this is set without using Synapse's media repository. +Defaults to `null`. + Example configuration: ```yaml max_avatar_size: 10M @@ -944,26 +938,27 @@ max_avatar_size: 10M --- ### `allowed_avatar_mimetypes` -The MIME types allowed for user avatars. Defaults to no restriction. +*(array|null)* The MIME types allowed for user avatars. Defaults to no restriction. -Note that user avatar changes will not work if this is set without -using Synapse's media repository. +Note that user avatar changes will not work if this is set without using Synapse's media repository. + +Defaults to `null`. Example configuration: ```yaml -allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] +allowed_avatar_mimetypes: +- image/png +- image/jpeg +- image/gif ``` --- ### `redaction_retention_period` -How long to keep redacted events in unredacted form in the database. After -this period redacted events get replaced with their redacted form in the DB. +How long to keep redacted events in unredacted form in the database. After this period redacted events get replaced with their redacted form in the DB. -Synapse will check whether the rentention period has concluded for redacted -events every 5 minutes. Thus, even if this option is set to `0`, Synapse may -still take up to 5 minutes to purge redacted events from the database. +Synapse will check whether the rentention period has concluded for redacted events every 5 minutes. Thus, even if this option is set to `0`, Synapse may still take up to 5 minutes to purge redacted events from the database. Set to `null` to disable. -Defaults to `7d`. Set to `null` to disable. +Defaults to `"7d"`. Example configuration: ```yaml @@ -972,9 +967,7 @@ redaction_retention_period: 28d --- ### `forgotten_room_retention_period` -How long to keep locally forgotten rooms before purging them from the DB. - -Defaults to `null`, meaning it's disabled. +How long to keep locally forgotten rooms before purging them from the DB. A value of `null` means it's disabled. Defaults to `null`. Example configuration: ```yaml @@ -983,9 +976,7 @@ forgotten_room_retention_period: 28d --- ### `user_ips_max_age` -How long to track users' last seen time and IPs in the database. - -Defaults to `28d`. Set to `null` to disable clearing out of old rows. +How long to track users' last seen time and IPs in the database. Set to `null` to disable clearing out of old rows. Defaults to `"28d"`. Example configuration: ```yaml @@ -994,13 +985,7 @@ user_ips_max_age: 14d --- ### `request_token_inhibit_3pid_errors` -Inhibits the `/requestToken` endpoints from returning an error that might leak -information about whether an e-mail address is in use or not on this -homeserver. Defaults to false. -Note that for some endpoints the error situation is the e-mail already being -used, and for others the error is entering the e-mail being unused. -If this option is enabled, instead of returning an error, these endpoints will -act as if no error happened and return a fake session ID ('sid') to clients. +*(boolean)* Inhibits the `/requestToken` endpoints from returning an error that might leak information about whether an e-mail address is in use or not on this homeserver. Note that for some endpoints the error situation is the e-mail already being used, and for others the error is entering the e-mail being unused. If this option is enabled, instead of returning an error, these endpoints will act as if no error happened and return a fake session ID (`sid`) to clients. Defaults to `false`. Example configuration: ```yaml @@ -1009,36 +994,30 @@ request_token_inhibit_3pid_errors: true --- ### `next_link_domain_whitelist` -A list of domains that the domain portion of `next_link` parameters -must match. +*(array|null)* A list of domains that the domain portion of `next_link` parameters must match. -This parameter is optionally provided by clients while requesting -validation of an email or phone number, and maps to a link that -users will be automatically redirected to after validation -succeeds. Clients can make use this parameter to aid the validation -process. +This parameter is optionally provided by clients while requesting validation of an email or phone number, and maps to a link that users will be automatically redirected to after validation succeeds. Clients can make use this parameter to aid the validation process. The whitelist is applied whether the homeserver or an identity server is handling validation. -The default value is no whitelist functionality; all domains are -allowed. Setting this value to an empty list will instead disallow -all domains. +The default value is no whitelist functionality; all domains are allowed. Setting this value to an empty list will instead disallow all domains. + +Defaults to `null`. Example configuration: ```yaml -next_link_domain_whitelist: ["matrix.org"] +next_link_domain_whitelist: matrix.org ``` --- -### `templates` and `custom_template_directory` +### `templates` + +*(object)* These options define templates to use when generating email or HTML page contents. + +See [here](../../templates.md) for more information about using custom templates. -These options define templates to use when generating email or HTML page contents. -The `custom_template_directory` determines which directory Synapse will try to -find template files in to use to generate email or HTML page contents. -If not set, or a file is not found within the template directory, a default -template from within the Synapse package will be used. +This setting has the following sub-options: -See [here](../../templates.md) for more -information about using custom templates. +* `custom_template_directory` (string|null): Determines which directory Synapse will try to find template files in to use to generate email or HTML page contents. If not set, or a file is not found within the template directory, a default template from within the Synapse package will be used. Defaults to `null`. Example configuration: ```yaml @@ -1048,62 +1027,49 @@ templates: --- ### `retention` -This option and the associated options determine message retention policy at the -server level. +*(object)* This option and the associated options determine message retention policy at the server level. -Room admins and mods can define a retention period for their rooms using the -`m.room.retention` state event, and server admins can cap this period by setting -the `allowed_lifetime_min` and `allowed_lifetime_max` config options. +Room admins and mods can define a retention period for their rooms using the `m.room.retention` state event, and server admins can cap this period by setting the `allowed_lifetime_min` and `allowed_lifetime_max` config options. -If this feature is enabled, Synapse will regularly look for and purge events -which are older than the room's maximum retention period. Synapse will also -filter events received over federation so that events that should have been -purged are ignored and not stored again. +If this feature is enabled, Synapse will regularly look for and purge events which are older than the room's maximum retention period. Synapse will also filter events received over federation so that events that should have been purged are ignored and not stored again. -The message retention policies feature is disabled by default. You can read more -about this feature [here](../../message_retention_policies.md). +The message retention policies feature is disabled by default. You can read more about this feature [here](../../message_retention_policies.md). This setting has the following sub-options: -* `default_policy`: Default retention policy. If set, Synapse will apply it to rooms that lack the - 'm.room.retention' state event. This option is further specified by the - `min_lifetime` and `max_lifetime` sub-options associated with it. Note that the - value of `min_lifetime` doesn't matter much because Synapse doesn't take it into account yet. - -* `allowed_lifetime_min` and `allowed_lifetime_max`: Retention policy limits. If - set, and the state of a room contains a `m.room.retention` event in its state - which contains a `min_lifetime` or a `max_lifetime` that's out of these bounds, - Synapse will cap the room's policy to these limits when running purge jobs. - -* `purge_jobs` and the associated `shortest_max_lifetime` and `longest_max_lifetime` sub-options: - Server admins can define the settings of the background jobs purging the - events whose lifetime has expired under the `purge_jobs` section. - - If no configuration is provided for this option, a single job will be set up to delete - expired events in every room daily. - - Each job's configuration defines which range of message lifetimes the job - takes care of. For example, if `shortest_max_lifetime` is '2d' and - `longest_max_lifetime` is '3d', the job will handle purging expired events in - rooms whose state defines a `max_lifetime` that's both higher than 2 days, and - lower than or equal to 3 days. Both the minimum and the maximum value of a - range are optional, e.g. a job with no `shortest_max_lifetime` and a - `longest_max_lifetime` of '3d' will handle every room with a retention policy - whose `max_lifetime` is lower than or equal to three days. - - The rationale for this per-job configuration is that some rooms might have a - retention policy with a low `max_lifetime`, where history needs to be purged - of outdated messages on a more frequent basis than for the rest of the rooms - (e.g. every 12h), but not want that purge to be performed by a job that's - iterating over every room it knows, which could be heavy on the server. - - If any purge job is configured, it is strongly recommended to have at least - a single job with neither `shortest_max_lifetime` nor `longest_max_lifetime` - set, or one job without `shortest_max_lifetime` and one job without - `longest_max_lifetime` set. Otherwise some rooms might be ignored, even if - `allowed_lifetime_min` and `allowed_lifetime_max` are set, because capping a - room's policy to these values is done after the policies are retrieved from - Synapse's database (which is done using the range specified in a purge job's - configuration). + +* `enabled` (boolean): Enforce message retention policies Defaults to `false`. + +* `default_policy` (object): Default message retention policy. If set, Synapse will apply it to rooms that lack the `m.room.retention` state event. + + This setting has the following sub-options: + + * `min_lifetime`: Minimum message retention time of the default message retention policy. Synapse doesn't take this option into account yet. Defaults to `null`. + + * `max_lifetime`: Maximum message retention time of the default message retention policy. Defaults to `null`. + +* `allowed_lifetime_min`: Retention policy limit. If set, and the state of a room contains a `m.room.retention` event in its state which contains a `min_lifetime` that's beyond this bound, Synapse will cap the room's policy to these limits when running purge jobs. Defaults to `null`. + +* `allowed_lifetime_max`: Retention policy limit. If set, and the state of a room contains a `m.room.retention` event in its state which contains a `max_lifetime` that's beyond this bound, Synapse will cap the room's policy to these limits when running purge jobs. Defaults to `null`. + +* `purge_jobs` (array|null): Server admins can define the settings of the background jobs purging the events whose lifetime has expired under the `purge_jobs` section. + + If no configuration is provided for this option, a single job will be set up to delete expired events in every room daily. + + Each job's configuration defines which range of message lifetimes the job takes care of. For example, if `shortest_max_lifetime` is "2d" and `longest_max_lifetime` is "3d", the job will handle purging expired events in rooms whose state defines a `max_lifetime` that's both higher than 2 days, and lower than or equal to 3 days. Both the minimum and the maximum value of a range are optional, e.g. a job with no `shortest_max_lifetime` and a `longest_max_lifetime` of "3d" will handle every room with a retention policy whose `max_lifetime` is lower than or equal to three days. + + The rationale for this per-job configuration is that some rooms might have a retention policy with a low `max_lifetime`, where history needs to be purged of outdated messages on a more frequent basis than for the rest of the rooms (e.g. every 12h), but not want that purge to be performed by a job that's iterating over every room it knows, which could be heavy on the server. + + If any purge job is configured, it is strongly recommended to have at least a single job with neither `shortest_max_lifetime` nor `longest_max_lifetime` set, or one job without `shortest_max_lifetime` and one job without `longest_max_lifetime` set. Otherwise some rooms might be ignored, even if `allowed_lifetime_min` and `allowed_lifetime_max` are set, because capping a room's policy to these values is done after the policies are retrieved from Synapse's database (which is done using the range specified in a purge job's configuration). + + Defaults to `null`. + + Options for each entry include: + + * `shortest_max_lifetime`: Apply job to rooms that have a `max_lifetime` higher than `shortest_max_lifetime`. A value of `null` never excludes any room. + + * `longest_max_lifetime`: Apply job to rooms that have a `max_lifetime` lower than or equal to `shortest_max_lifetime`. A value of `null` never excludes any room. + + * `interval` (duration): How often to run the job. Example configuration: ```yaml @@ -1115,10 +1081,10 @@ retention: allowed_lifetime_min: 1d allowed_lifetime_max: 1y purge_jobs: - - longest_max_lifetime: 3d - interval: 12h - - shortest_max_lifetime: 3d - interval: 1d + - longest_max_lifetime: 3d + interval: 12h + - shortest_max_lifetime: 3d + interval: 1d ``` --- ## TLS @@ -1128,32 +1094,29 @@ Options related to TLS. --- ### `tls_certificate_path` -This option specifies a PEM-encoded X509 certificate for TLS. -This certificate, as of Synapse 1.0, will need to be a valid and verifiable -certificate, signed by a recognised Certificate Authority. Defaults to none. +*(string|null)* This option specifies a PEM-encoded X509 certificate for TLS. This certificate, as of Synapse 1.0, will need to be a valid and verifiable certificate, signed by a recognised Certificate Authority. + +Be sure to use a `.pem` file that includes the full certificate chain including any intermediate certificates (for instance, if using certbot, use `fullchain.pem` as your certificate, not `cert.pem`). -Be sure to use a `.pem` file that includes the full certificate chain including -any intermediate certificates (for instance, if using certbot, use -`fullchain.pem` as your certificate, not `cert.pem`). +Defaults to `null`. Example configuration: ```yaml -tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt" +tls_certificate_path: CONFDIR/SERVERNAME.tls.crt ``` --- ### `tls_private_key_path` -PEM-encoded private key for TLS. Defaults to none. +*(string|null)* PEM-encoded private key for TLS. Defaults to `null`. Example configuration: ```yaml -tls_private_key_path: "CONFDIR/SERVERNAME.tls.key" +tls_private_key_path: CONFDIR/SERVERNAME.tls.key ``` --- ### `federation_verify_certificates` -Whether to verify TLS server certificates for outbound federation requests. -Defaults to true. To disable certificate verification, set the option to false. +*(boolean)* Whether to verify TLS server certificates for outbound federation requests. To disable certificate verification, set the option to false. Defaults to `true`. Example configuration: ```yaml @@ -1162,53 +1125,51 @@ federation_verify_certificates: false --- ### `federation_client_minimum_tls_version` -The minimum TLS version that will be used for outbound federation requests. +*(string)* The minimum TLS version that will be used for outbound federation requests. + +Configurable to `"1"`, `"1.1"`, `"1.2"`, or `"1.3"`. Note that setting this value higher than `"1.2"` will prevent federation to most of the public Matrix network: only configure it to `"1.3"` if you have an entirely private federation setup and you can ensure TLS 1.3 support. -Defaults to `"1"`. Configurable to `"1"`, `"1.1"`, `"1.2"`, or `"1.3"`. Note -that setting this value higher than `"1.2"` will prevent federation to most -of the public Matrix network: only configure it to `"1.3"` if you have an -entirely private federation setup and you can ensure TLS 1.3 support. +Defaults to `"1"`. Example configuration: ```yaml -federation_client_minimum_tls_version: "1.2" +federation_client_minimum_tls_version: '1.2' ``` --- ### `federation_certificate_verification_whitelist` -Skip federation certificate verification on a given whitelist -of domains. +*(array)* Skip federation certificate verification on a given whitelist of domains. -This setting should only be used in very specific cases, such as -federation over Tor hidden services and similar. For private networks -of homeservers, you likely want to use a private CA instead. +This setting should only be used in very specific cases, such as federation over Tor hidden services and similar. For private networks of homeservers, you likely want to use a private CA instead. Only effective if `federation_verify_certificates` is `true`. +Defaults to `[]`. + Example configuration: ```yaml federation_certificate_verification_whitelist: - - lon.example.com - - "*.domain.com" - - "*.onion" +- lon.example.com +- '*.domain.com' +- '*.onion' ``` --- ### `federation_custom_ca_list` -List of custom certificate authorities for federation traffic. +*(array)* List of custom certificate authorities for federation traffic. + +This setting should only normally be used within a private network of homeservers. -This setting should only normally be used within a private network of -homeservers. +Note that this list will replace those that are provided by your operating environment. Certificates must be in PEM format. -Note that this list will replace those that are provided by your -operating environment. Certificates must be in PEM format. +Defaults to `[]`. Example configuration: ```yaml federation_custom_ca_list: - - myCA1.pem - - myCA2.pem - - myCA3.pem +- myCA1.pem +- myCA2.pem +- myCA3.pem ``` --- ## Federation @@ -1218,31 +1179,25 @@ Options related to federation. --- ### `federation_domain_whitelist` -Restrict federation to the given whitelist of domains. -N.B. we recommend also firewalling your federation listener to limit -inbound federation traffic as early as possible, rather than relying -purely on this application-layer restriction. If not specified, the -default is to whitelist everything. +*(array)* Restrict federation to the given whitelist of domains. N.B. we recommend also firewalling your federation listener to limit inbound federation traffic as early as possible, rather than relying purely on this application-layer restriction. If not specified, the default is to whitelist everything. + +Note: this does not stop a server from joining rooms that servers not on the whitelist are in. As such, this option is really only useful to establish a "private federation", where a group of servers all whitelist each other and have the same whitelist. -Note: this does not stop a server from joining rooms that servers not on the -whitelist are in. As such, this option is really only useful to establish a -"private federation", where a group of servers all whitelist each other and have -the same whitelist. +Defaults to `[]`. Example configuration: ```yaml federation_domain_whitelist: - - lon.example.com - - nyc.example.com - - syd.example.com +- lon.example.com +- nyc.example.com +- syd.example.com ``` --- ### `federation_whitelist_endpoint_enabled` -Enables an endpoint for fetching the federation whitelist config. +*(boolean)* Enables an endpoint for fetching the federation whitelist config. -The request method and path is `GET /_synapse/client/v1/config/federation_whitelist`, and the -response format is: +The request method and path is `GET /_synapse/client/v1/config/federation_whitelist`, and the response format is: ```json { @@ -1257,6 +1212,8 @@ If `whitelist_enabled` is `false` then the server is permitted to federate with The endpoint requires authentication. +Defaults to `false`. + Example configuration: ```yaml federation_whitelist_endpoint_enabled: true @@ -1264,25 +1221,18 @@ federation_whitelist_endpoint_enabled: true --- ### `federation_metrics_domains` -Report prometheus metrics on the age of PDUs being sent to and received from -the given domains. This can be used to give an idea of "delay" on inbound -and outbound federation, though be aware that any delay can be due to problems -at either end or with the intermediate network. - -By default, no domains are monitored in this way. +*(array)* Report prometheus metrics on the age of PDUs being sent to and received from the given domains. This can be used to give an idea of "delay" on inbound and outbound federation, though be aware that any delay can be due to problems at either end or with the intermediate network. Defaults to `[]`. Example configuration: ```yaml federation_metrics_domains: - - matrix.org - - example.com +- matrix.org +- example.com ``` --- ### `allow_profile_lookup_over_federation` -Set to false to disable profile lookup over federation. By default, the -Federation API allows other homeservers to obtain profile data of any user -on this homeserver. +*(boolean)* Set to false to disable profile lookup over federation. By default, the Federation API allows other homeservers to obtain profile data of any user on this homeserver. Defaults to `true`. Example configuration: ```yaml @@ -1291,9 +1241,7 @@ allow_profile_lookup_over_federation: false --- ### `allow_device_name_lookup_over_federation` -Set this option to true to allow device display name lookup over federation. By default, the -Federation API prevents other homeservers from obtaining the display names of any user devices -on this homeserver. +*(boolean)* Set this option to true to allow device display name lookup over federation. By default, the Federation API prevents other homeservers from obtaining the display names of any user devices on this homeserver. Defaults to `false`. Example configuration: ```yaml @@ -1302,27 +1250,29 @@ allow_device_name_lookup_over_federation: true --- ### `federation` -The federation section defines some sub-options related to federation. +*(object)* The federation section defines some sub-options related to federation. -The following options are related to configuring timeout and retry logic for one request, -independently of the others. -Short retry algorithm is used when something or someone will wait for the request to have an -answer, while long retry is used for requests that happen in the background, -like sending a federation transaction. +The following options are related to configuring timeout and retry logic for one request, independently of the others. Short retry algorithm is used when something or someone will wait for the request to have an answer, while long retry is used for requests that happen in the background, like sending a federation transaction. -* `client_timeout`: timeout for the federation requests. Default to 60s. -* `max_short_retry_delay`: maximum delay to be used for the short retry algo. Default to 2s. -* `max_long_retry_delay`: maximum delay to be used for the short retry algo. Default to 60s. -* `max_short_retries`: maximum number of retries for the short retry algo. Default to 3 attempts. -* `max_long_retries`: maximum number of retries for the long retry algo. Default to 10 attempts. +`destination_*` options control the retry logic when communicating with a specific homeserver destination. Unlike the previous configuration options, these values apply across all requests for a given destination and the state of the backoff is stored in the database. -The following options control the retry logic when communicating with a specific homeserver destination. -Unlike the previous configuration options, these values apply across all requests -for a given destination and the state of the backoff is stored in the database. +This setting has the following sub-options: + +* `client_timeout` (duration): Timeout for the federation requests. Defaults to `"60s"`. + +* `max_short_retry_delay` (duration): Maximum delay to be used for the short retry algo. Defaults to `"2s"`. + +* `max_long_retry_delay` (duration): Maximum delay to be used for the long retry algo. Defaults to `"60s"`. + +* `max_short_retries` (integer): Maximum number of retries for the short retry algo. Defaults to `3`. + +* `max_long_retries` (integer): Maximum number of retries for the long retry algo. Defaults to `10`. + +* `destination_min_retry_interval` (duration): The initial backoff, after the first request fails. Defaults to `"10m"`. -* `destination_min_retry_interval`: the initial backoff, after the first request fails. Defaults to 10m. -* `destination_retry_multiplier`: how much we multiply the backoff by after each subsequent fail. Defaults to 2. -* `destination_max_retry_interval`: a cap on the backoff. Defaults to a week. +* `destination_retry_multiplier` (integer): How much we multiply the backoff by after each subsequent fail. Defaults to `2`. + +* `destination_max_retry_interval` (duration): A cap on the backoff. Defaults to `"1w"`. Example configuration: ```yaml @@ -1344,93 +1294,69 @@ Options related to caching. --- ### `event_cache_size` -The number of events to cache in memory. Defaults to 10K. Like other caches, -this is affected by `caches.global_factor` (see below). +*(size)* The number of events to cache in memory. Defaults to 10K. Like other caches, this is affected by `caches.global_factor` (see below). For example, the default is 10K and the global_factor default is 0.5. Since 10K * 0.5 is 5K then the event cache size will be 5K. -The cache affected by this configuration is named as "*getEvent*". +The cache affected by this configuration is named as "\*getEvent\*". Note that this option is not part of the `caches` section. +Defaults to `"10K"`. + Example configuration: ```yaml event_cache_size: 15K ``` --- -### `caches` and associated values +### `caches` -A cache 'factor' is a multiplier that can be applied to each of -Synapse's caches in order to increase or decrease the maximum -number of entries that can be stored. +*(object)* A cache "factor" is a multiplier that can be applied to each of Synapse's caches in order to increase or decrease the maximum number of entries that can be stored. -`caches` can be configured through the following sub-options: +This setting has the following sub-options: -* `global_factor`: Controls the global cache factor, which is the default cache factor - for all caches if a specific factor for that cache is not otherwise - set. +* `global_factor` (number): Controls the global cache factor, which is the default cache factor for all caches if a specific factor for that cache is not otherwise set. - This can also be set by the `SYNAPSE_CACHE_FACTOR` environment - variable. Setting by environment variable takes priority over - setting through the config file. + This can also be set by the `SYNAPSE_CACHE_FACTOR` environment variable. Setting by environment variable takes priority over setting through the config file. Defaults to 0.5, which will halve the size of all caches. Note that changing this value also affects the HTTP connection pool. -* `per_cache_factors`: A dictionary of cache name to cache factor for that individual - cache. Overrides the global cache factor for a given cache. + Defaults to `0.5`. + +* `per_cache_factors` (object): A dictionary of cache name to cache factor for that individual cache. Overrides the global cache factor for a given cache. - These can also be set through environment variables comprised - of `SYNAPSE_CACHE_FACTOR_` + the name of the cache in capital - letters and underscores. Setting by environment variable - takes priority over setting through the config file. - Ex. `SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0` + These can also be set through environment variables comprised of `SYNAPSE_CACHE_FACTOR_` + the name of the cache in capital letters and underscores. Setting by environment variable takes priority over setting through the config file. Ex. `SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0` - Some caches have '*' and other characters that are not - alphanumeric or underscores. These caches can be named with or - without the special characters stripped. For example, to specify - the cache factor for `*stateGroupCache*` via an environment - variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`. + Some caches have '*' and other characters that are not alphanumeric or underscores. These caches can be named with or without the special characters stripped. For example, to specify the cache factor for `*stateGroupCache*` via an environment variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`. -* `expire_caches`: Controls whether cache entries are evicted after a specified time - period. Defaults to true. Set to false to disable this feature. Note that never expiring - caches may result in excessive memory usage. + Defaults to `{}`. -* `cache_entry_ttl`: If `expire_caches` is enabled, this flag controls how long an entry can - be in a cache without having been accessed before being evicted. - Defaults to 30m. +* `expire_caches` (boolean): Controls whether cache entries are evicted after a specified time period. Set to false to disable this feature. Note that never expiring caches may result in excessive memory usage. Defaults to `true`. -* `sync_response_cache_duration`: Controls how long the results of a /sync request are - cached for after a successful response is returned. A higher duration can help clients - with intermittent connections, at the cost of higher memory usage. - A value of zero means that sync responses are not cached. - Defaults to 2m. +* `cache_entry_ttl` (duration): If `expire_caches` is enabled, this flag controls how long an entry can be in a cache without having been accessed before being evicted. Defaults to `"30m"`. + +* `sync_response_cache_duration` (duration): Controls how long the results of a /sync request are cached for after a successful response is returned. A higher duration can help clients with intermittent connections, at the cost of higher memory usage. A value of zero means that sync responses are not cached. *Changed in Synapse 1.62.0*: The default was changed from 0 to 2m. -* `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and - `min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory - usage and cache entry availability. You must be using [jemalloc](../administration/admin_faq.md#help-synapse-is-slow-and-eats-all-my-ramcpu) - to utilize this option, and all three of the options must be specified for this feature to work. This option - defaults to off, enable it by providing values for the sub-options listed below. Please note that the feature will not work - and may cause unstable behavior (such as excessive emptying of caches or exceptions) if all of the values are not provided. - Please see the [Config Conventions](#config-conventions) for information on how to specify memory size and cache expiry - durations. - * `max_cache_memory_usage` sets a ceiling on how much memory the cache can use before caches begin to be continuously evicted. - They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in - the setting below, or until the `min_cache_ttl` is hit. There is no default value for this option. - * `target_cache_memory_usage` sets a rough target for the desired memory usage of the caches. There is no default value - for this option. - * `min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when - caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches - from being emptied while Synapse is evicting due to memory. There is no default value for this option. + Defaults to `"2m"`. + +* `cache_autotuning` (object): `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and `min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory usage and cache entry availability. You must be using [jemalloc](../administration/admin_faq.md#help-synapse-is-slow-and-eats-all-my-ramcpu) to utilize this option, and all three of the options must be specified for this feature to work. This option defaults to off, enable it by providing values for the sub-options listed below. Please note that the feature will not work and may cause unstable behavior (such as excessive emptying of caches or exceptions) if all of the values are not provided. Please see the [Config Conventions](#config-conventions) for information on how to specify memory size and cache expiry durations. + + This setting has the following sub-options: + + * `max_cache_memory_usage`: Sets a ceiling on how much memory the cache can use before caches begin to be continuously evicted. They will continue to be evicted until the memory usage drops below the `target_cache_memory_usage`, set in the setting below, or until the `min_cache_ttl` is hit. Defaults to `null`. + + * `target_cache_memory_usage`: Sets a rough target for the desired memory usage of the caches. Defaults to `null`. + + * `min_cache_ttl`: Sets a limit under which newer cache entries are not evicted and is only applied when caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches from being emptied while Synapse is evicting due to memory. Defaults to `null`. Example configuration: ```yaml -event_cache_size: 15K caches: global_factor: 1.0 per_cache_factors: @@ -1444,54 +1370,42 @@ caches: ### Reloading cache factors -The cache factors (i.e. `caches.global_factor` and `caches.per_cache_factors`) may be reloaded at any time by sending a -[`SIGHUP`](https://en.wikipedia.org/wiki/SIGHUP) signal to Synapse using e.g. +The cache factors (i.e. `caches.global_factor` and `caches.per_cache_factors`) may be reloaded at any time by sending a [`SIGHUP`](https://en.wikipedia.org/wiki/SIGHUP) signal to Synapse using e.g. ```commandline kill -HUP [PID_OF_SYNAPSE_PROCESS] ``` -If you are running multiple workers, you must individually update the worker -config file and send this signal to each worker process. +If you are running multiple workers, you must individually update the worker config file and send this signal to each worker process. -If you're using the [example systemd service](https://github.com/element-hq/synapse/blob/develop/contrib/systemd/matrix-synapse.service) -file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using -`systemctl reload matrix-synapse`. +If you're using the [example systemd service](https://github.com/element-hq/synapse/blob/develop/contrib/systemd/matrix-synapse.service) file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using `systemctl reload matrix-synapse`. --- ## Database + Config options related to database settings. --- ### `database` -The `database` setting defines the database that synapse uses to store all of -its data. +*(object)* The `database` setting defines the database that synapse uses to store all of its data. -Associated sub-options: +For more information on using Synapse with Postgres, see [here](../../postgres.md). -* `name`: this option specifies the database engine to use: either `sqlite3` (for SQLite) - or `psycopg2` (for PostgreSQL). If no name is specified Synapse will default to SQLite. +This setting has the following sub-options: -* `txn_limit` gives the maximum number of transactions to run per connection - before reconnecting. Defaults to 0, which means no limit. +* `name` (string): This option specifies the database engine to use: either `sqlite3` (for SQLite) or `psycopg2` (for PostgreSQL). If no name is specified Synapse will default to SQLite. Defaults to `"sqlite3"`. -* `allow_unsafe_locale` is an option specific to Postgres. Under the default behavior, Synapse will refuse to - start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended) - by setting `allow_unsafe_locale` to true. Note that doing so may corrupt your database. You can find more information - [here](../../postgres.md#fixing-incorrect-collate-or-ctype) and [here](https://wiki.postgresql.org/wiki/Locale_data_changes). +* `txn_limit` (integer): Gives the maximum number of transactions to run per connection before reconnecting. 0 means no limit. Defaults to `0`. -* `args` gives options which are passed through to the database engine, - except for options starting with `cp_`, which are used to configure the Twisted - connection pool. For a reference to valid arguments, see: - * for [sqlite](https://docs.python.org/3/library/sqlite3.html#sqlite3.connect) - * for [postgres](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS) - * for [the connection pool](https://docs.twistedmatrix.com/en/stable/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__) +* `allow_unsafe_locale` (boolean): This option is specific to Postgres. Under the default behavior, Synapse will refuse to start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended) by setting `allow_unsafe_locale` to true. Note that doing so may corrupt your database. You can find more information [here](../../postgres.md#fixing-incorrect-collate-or-ctype) and [here](https://wiki.postgresql.org/wiki/Locale_data_changes). Defaults to `false`. -For more information on using Synapse with Postgres, -see [here](../../postgres.md). +* `args` (object): Gives options which are passed through to the database engine, except for options starting with `cp_`, which are used to configure the Twisted connection pool. For a reference to valid arguments, see: + * for [sqlite](https://docs.python.org/3/library/sqlite3.html#sqlite3.connect) + * for [postgres](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS) + * for [the connection pool](https://docs.twistedmatrix.com/en/stable/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__) -Example SQLite configuration: +Example configurations: ```yaml database: name: sqlite3 @@ -1499,7 +1413,6 @@ database: database: /path/to/homeserver.db ``` -Example Postgres configuration: ```yaml database: name: psycopg2 @@ -1516,19 +1429,11 @@ database: --- ### `databases` -The `databases` option allows specifying a mapping between certain database tables and -database host details, spreading the load of a single Synapse instance across multiple -database backends. This is often referred to as "database sharding". This option is only -supported for PostgreSQL database backends. +*(object)* The `databases` option allows specifying a mapping between certain database tables and database host details, spreading the load of a single Synapse instance across multiple database backends. This is often referred to as "database sharding". This option is only supported for PostgreSQL database backends. -**Important note:** This is a supported option, but is not currently used in production by the -Matrix.org Foundation. Proceed with caution and always make backups. +**Important note:** This is a supported option, but is not currently used in production by the Matrix.org Foundation. Proceed with caution and always make backups. -`databases` is a dictionary of arbitrarily-named database entries. Each entry is equivalent -to the value of the `database` homeserver config option (see above), with the addition of -a `data_stores` key. `data_stores` is an array of strings that specifies the data store(s) -(a defined label for a set of tables) that should be stored on the associated database -backend entry. +`databases` is a dictionary of arbitrarily-named database entries. Each entry is equivalent to the value of the `database` homeserver config option (see above), with the addition of a `data_stores` key. `data_stores` is an array of strings that specifies the data store(s) (a defined label for a set of tables) that should be stored on the associated database backend entry. The currently defined values for `data_stores` are: @@ -1544,30 +1449,22 @@ The currently defined values for `data_stores` are: * `"main"`: All other database tables and sequences. -All databases will end up with additional tables used for tracking database schema migrations -and any pending background updates. Synapse will create these automatically on startup when checking for -and/or performing database schema migrations. +All databases will end up with additional tables used for tracking database schema migrations and any pending background updates. Synapse will create these automatically on startup when checking for and/or performing database schema migrations. -To migrate an existing database configuration (e.g. all tables on a single database) to a different -configuration (e.g. the "main" data store on one database, and "state" on another), do the following: +To migrate an existing database configuration (e.g. all tables on a single database) to a different configuration (e.g. the "main" data store on one database, and "state" on another), do the following: 1. Take a backup of your existing database. Things can and do go wrong and database corruption is no joke! -2. Ensure all pending database migrations have been applied and background updates have run. The simplest - way to do this is to use the `update_synapse_database` script supplied with your Synapse installation. +2. Ensure all pending database migrations have been applied and background updates have run. The simplest way to do this is to use the `update_synapse_database` script supplied with your Synapse installation. ```sh update_synapse_database --database-config homeserver.yaml --run-background-updates ``` -3. Copy over the necessary tables and sequences from one database to the other. Tables relating to database - migrations, schemas, schema versions and background updates should **not** be copied. +3. Copy over the necessary tables and sequences from one database to the other. Tables relating to database migrations, schemas, schema versions and background updates should **not** be copied. - As an example, say that you'd like to split out the "state" data store from an existing database which - currently contains all data stores. + As an example, say that you'd like to split out the "state" data store from an existing database which currently contains all data stores. - Simply copy the tables and sequences defined above for the "state" datastore from the existing database - to the secondary database. As noted above, additional tables will be created in the secondary database - when Synapse is started. + Simply copy the tables and sequences defined above for the "state" datastore from the existing database to the secondary database. As noted above, additional tables will be created in the secondary database when Synapse is started. 4. Modify/create the `databases` option in your `homeserver.yaml` to match the desired database configuration. 5. Start Synapse. Check that it starts up successfully and that things generally seem to be working. @@ -1575,14 +1472,16 @@ configuration (e.g. the "main" data store on one database, and "state" on anothe Only one of the options `database` or `databases` may be specified in your config, but not both. -Example configuration: +Defaults to `{}`. +Example configuration: ```yaml databases: basement_box: name: psycopg2 txn_limit: 10000 - data_stores: ["main"] + data_stores: + - main args: user: synapse_user password: secretpassword @@ -1591,11 +1490,11 @@ databases: port: 5432 cp_min: 5 cp_max: 10 - my_other_database: name: psycopg2 txn_limit: 10000 - data_stores: ["state"] + data_stores: + - state args: user: synapse_user password: secretpassword @@ -1607,240 +1506,345 @@ databases: ``` --- ## Logging + Config options related to logging. --- ### `log_config` -This option specifies a yaml python logging config file as described -[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema). +*(string|null)* This option specifies a yaml python logging config file as described [here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema). Defaults to `null`. Example configuration: ```yaml -log_config: "CONFDIR/SERVERNAME.log.config" +log_config: CONFDIR/SERVERNAME.log.config ``` --- ## Ratelimiting + Options related to ratelimiting in Synapse. Each ratelimiting configuration is made of two parameters: - - `per_second`: number of requests a client can send per second. - - `burst_count`: number of requests a client can send before being throttled. +- `per_second`: number of requests a client can send per second. +- `burst_count`: number of requests a client can send before being throttled. + --- ### `rc_message` +*(object)* Ratelimiting settings for client messaging. -Ratelimiting settings for client messaging. +This is a ratelimiting option for messages that ratelimits sending based on the account the client is using. -This is a ratelimiting option for messages that ratelimits sending based on the account the client -is using. It defaults to: `per_second: 0.2`, `burst_count: 10`. +This setting has the following sub-options: + +* `per_second` (number): Maximum number of requests a client can send per second. + +* `burst_count` (number): Maximum number of requests a client can send before being throttled. + +Default configuration: +```yaml +rc_message: + per_second: 0.2 + burst_count: 10.0 +``` Example configuration: ```yaml rc_message: per_second: 0.5 - burst_count: 15 + burst_count: 15.0 ``` --- ### `rc_registration` -This option ratelimits registration requests based on the client's IP address. -It defaults to `per_second: 0.17`, `burst_count: 3`. +*(object)* This option ratelimits registration requests based on the client's IP address. + +This setting has the following sub-options: + +* `per_second` (number): Maximum number of requests a client can send per second. + +* `burst_count` (number): Maximum number of requests a client can send before being throttled. + +Default configuration: +```yaml +rc_registration: + per_second: 0.17 + burst_count: 3.0 +``` Example configuration: ```yaml rc_registration: per_second: 0.15 - burst_count: 2 + burst_count: 2.0 ``` --- ### `rc_registration_token_validity` -This option checks the validity of registration tokens that ratelimits requests based on -the client's IP address. -Defaults to `per_second: 0.1`, `burst_count: 5`. +*(object)* This option checks the validity of registration tokens that ratelimits requests based on the client's IP address. + +This setting has the following sub-options: + +* `per_second` (number): Maximum number of requests a client can send per second. + +* `burst_count` (number): Maximum number of requests a client can send before being throttled. + +Default configuration: +```yaml +rc_registration_token_validity: + per_second: 0.1 + burst_count: 5.0 +``` Example configuration: ```yaml rc_registration_token_validity: per_second: 0.3 - burst_count: 6 + burst_count: 6.0 ``` --- ### `rc_login` -This option specifies several limits for login: -* `address` ratelimits login requests based on the client's IP - address. Defaults to `per_second: 0.003`, `burst_count: 5`. +*(object)* This option specifies several limits for login. + +This setting has the following sub-options: + +* `address` (object): Ratelimits login requests based on the client's IP address. Defaults to `{"per_second": 0.003, "burst_count": 5.0}`. -* `account` ratelimits login requests based on the account the - client is attempting to log into. Defaults to `per_second: 0.003`, - `burst_count: 5`. + This setting has the following sub-options: -* `failed_attempts` ratelimits login requests based on the account the - client is attempting to log into, based on the amount of failed login - attempts for this account. Defaults to `per_second: 0.17`, `burst_count: 3`. + * `per_second` (number): Maximum number of requests a client can send per second. + + * `burst_count` (number): Maximum number of requests a client can send before being throttled. + +* `account` (object): Ratelimits login requests based on the account the client is attempting to log into. Defaults to `{"per_second": 0.003, "burst_count": 5.0}`. + + This setting has the following sub-options: + + * `per_second` (number): Maximum number of requests a client can send per second. + + * `burst_count` (number): Maximum number of requests a client can send before being throttled. + +* `failed_attempts` (object): Ratelimits login requests based on the account the client is attempting to log into, based on the amount of failed login attempts for this account. Defaults to `{"per_second": 0.17, "burst_count": 3.0}`. + + This setting has the following sub-options: + + * `per_second` (number): Maximum number of requests a client can send per second. + + * `burst_count` (number): Maximum number of requests a client can send before being throttled. Example configuration: ```yaml rc_login: address: per_second: 0.15 - burst_count: 5 + burst_count: 5.0 account: per_second: 0.18 - burst_count: 4 + burst_count: 4.0 failed_attempts: per_second: 0.19 - burst_count: 7 + burst_count: 7.0 ``` --- ### `rc_admin_redaction` -This option sets ratelimiting redactions by room admins. If this is not explicitly -set then it uses the same ratelimiting as per `rc_message`. This is useful -to allow room admins to deal with abuse quickly. +*(object)* This option sets ratelimiting redactions by room admins. If this is not explicitly set then it uses the same ratelimiting as per `rc_message`. This is useful to allow room admins to deal with abuse quickly. + +This setting has the following sub-options: + +* `per_second` (number): Maximum number of requests a client can send per second. + +* `burst_count` (number): Maximum number of requests a client can send before being throttled. Example configuration: ```yaml rc_admin_redaction: - per_second: 1 - burst_count: 50 + per_second: 1.0 + burst_count: 50.0 ``` --- ### `rc_joins` -This option allows for ratelimiting number of rooms a user can join. This setting has the following sub-options: +*(object)* This option allows for ratelimiting number of rooms a user can join. -* `local`: ratelimits when users are joining rooms the server is already in. - Defaults to `per_second: 0.1`, `burst_count: 10`. +This setting has the following sub-options: + +* `local` (object): Ratelimits when users are joining rooms the server is already in. Defaults to `{"per_second": 0.1, "burst_count": 10.0}`. + + This setting has the following sub-options: + + * `per_second` (number): Maximum number of requests a client can send per second. + + * `burst_count` (number): Maximum number of requests a client can send before being throttled. + +* `remote` (object): Ratelimits when users are trying to join rooms not on the server (which can be more computationally expensive than restricting locally). Defaults to `{"per_second": 0.01, "burst_count": 10.0}`. + + This setting has the following sub-options: -* `remote`: ratelimits when users are trying to join rooms not on the server (which - can be more computationally expensive than restricting locally). Defaults to - `per_second: 0.01`, `burst_count: 10` + * `per_second` (number): Maximum number of requests a client can send per second. + + * `burst_count` (number): Maximum number of requests a client can send before being throttled. Example configuration: ```yaml rc_joins: local: per_second: 0.2 - burst_count: 15 + burst_count: 15.0 remote: per_second: 0.03 - burst_count: 12 + burst_count: 12.0 ``` --- ### `rc_joins_per_room` -This option allows admins to ratelimit joins to a room based on the number of recent -joins (local or remote) to that room. It is intended to mitigate mass-join spam -waves which target multiple homeservers. +*(object)* This option allows admins to ratelimit joins to a room based on the number of recent joins (local or remote) to that room. It is intended to mitigate mass-join spam waves which target multiple homeservers. + +_Added in Synapse 1.64.0._ + +This setting has the following sub-options: + +* `per_second` (number): Maximum number of requests a client can send per second. -By default, one join is permitted to a room every second, with an accumulating -buffer of up to ten instantaneous joins. +* `burst_count` (number): Maximum number of requests a client can send before being throttled. -Example configuration (default values): +Default configuration: ```yaml rc_joins_per_room: - per_second: 1 - burst_count: 10 + per_second: 1.0 + burst_count: 10.0 ``` -_Added in Synapse 1.64.0._ - +Example configuration: +```yaml +rc_joins_per_room: + per_second: 1.0 + burst_count: 10.0 +``` --- ### `rc_3pid_validation` -This option ratelimits how often a user or IP can attempt to validate a 3PID. -Defaults to `per_second: 0.003`, `burst_count: 5`. +*(object)* This option ratelimits how often a user or IP can attempt to validate a 3PID. + +This setting has the following sub-options: + +* `per_second` (number): Maximum number of requests a client can send per second. + +* `burst_count` (number): Maximum number of requests a client can send before being throttled. + +Default configuration: +```yaml +rc_3pid_validation: + per_second: 0.003 + burst_count: 5.0 +``` Example configuration: ```yaml rc_3pid_validation: per_second: 0.003 - burst_count: 5 + burst_count: 5.0 ``` --- ### `rc_invites` -This option sets ratelimiting how often invites can be sent in a room or to a -specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10`, -`per_user` defaults to `per_second: 0.003`, `burst_count: 5`, and `per_issuer` -defaults to `per_second: 0.3`, `burst_count: 10`. +*(object)* This option sets ratelimiting how often invites can be sent in a room or to a specific user. -Client requests that invite user(s) when [creating a -room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom) -will count against the `rc_invites.per_room` limit, whereas -client requests to [invite a single user to a -room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidinvite) -will count against both the `rc_invites.per_user` and `rc_invites.per_room` limits. +Client requests that invite user(s) when [creating a room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom) will count against the `rc_invites.per_room` limit, whereas client requests to [invite a single user to a room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidinvite) will count against both the `rc_invites.per_user` and `rc_invites.per_room` limits. -Federation requests to invite a user will count against the `rc_invites.per_user` -limit only, as Synapse presumes ratelimiting by room will be done by the sending server. +Federation requests to invite a user will count against the `rc_invites.per_user` limit only, as Synapse presumes ratelimiting by room will be done by the sending server. -The `rc_invites.per_user` limit applies to the *receiver* of the invite, rather than the -sender, meaning that a `rc_invite.per_user.burst_count` of 5 mandates that a single user -cannot *receive* more than a burst of 5 invites at a time. +_Changed in version 1.63:_ added the `per_issuer` limit. -In contrast, the `rc_invites.per_issuer` limit applies to the *issuer* of the invite, meaning that a `rc_invite.per_issuer.burst_count` of 5 mandates that single user cannot *send* more than a burst of 5 invites at a time. +This setting has the following sub-options: -_Changed in version 1.63:_ added the `per_issuer` limit. +* `per_room` (object): Applies to the room of the invitation. Defaults to `{"per_second": 0.3, "burst_count": 10.0}`. + + This setting has the following sub-options: + + * `per_second` (number): Maximum number of requests a client can send per second. + + * `burst_count` (number): Maximum number of requests a client can send before being throttled. + +* `per_user` (object): Applies to the *receiver* of the invite, rather than the sender, meaning that a `rc_invite.per_user.burst_count` of 5 mandates that a single user cannot *receive* more than a burst of 5 invites at a time. Defaults to `{"per_second": 0.003, "burst_count": 5.0}`. + + This setting has the following sub-options: + + * `per_second` (number): Maximum number of requests a client can send per second. + + * `burst_count` (number): Maximum number of requests a client can send before being throttled. + +* `per_issuer` (object): Applies to the *issuer* of the invite, meaning that a `rc_invite.per_issuer.burst_count` of 5 mandates that single user cannot *send* more than a burst of 5 invites at a time. Defaults to `{"per_second": 0.3, "burst_count": 10.0}`. + + This setting has the following sub-options: + + * `per_second` (number): Maximum number of requests a client can send per second. + + * `burst_count` (number): Maximum number of requests a client can send before being throttled. Example configuration: ```yaml rc_invites: per_room: per_second: 0.5 - burst_count: 5 + burst_count: 5.0 per_user: per_second: 0.004 - burst_count: 3 + burst_count: 3.0 per_issuer: per_second: 0.5 - burst_count: 5 + burst_count: 5.0 ``` - --- ### `rc_third_party_invite` -This option ratelimits 3PID invites (i.e. invites sent to a third-party ID -such as an email address or a phone number) based on the account that's -sending the invite. Defaults to `per_second: 0.2`, `burst_count: 10`. +*(object)* This option ratelimits 3PID invites (i.e. invites sent to a third-party ID such as an email address or a phone number) based on the account that's sending the invite. -Example configuration: +This setting has the following sub-options: + +* `per_second` (number): Maximum number of requests a client can send per second. + +* `burst_count` (number): Maximum number of requests a client can send before being throttled. + +Default configuration: ```yaml rc_third_party_invite: per_second: 0.2 - burst_count: 10 + burst_count: 10.0 ``` --- ### `rc_media_create` -This option ratelimits creation of MXC URIs via the `/_matrix/media/v1/create` -endpoint based on the account that's creating the media. Defaults to -`per_second: 10`, `burst_count: 50`. +*(object)* This option ratelimits creation of MXC URIs via the `/_matrix/media/v1/create` endpoint based on the account that's creating the media. -Example configuration: +This setting has the following sub-options: + +* `per_second` (number): Maximum number of requests a client can send per second. + +* `burst_count` (number): Maximum number of requests a client can send before being throttled. + +Default configuration: ```yaml rc_media_create: - per_second: 10 - burst_count: 50 + per_second: 10.0 + burst_count: 50.0 ``` --- ### `rc_federation` -Defines limits on federation requests. +*(object)* Defines limits on federation requests. -The `rc_federation` configuration has the following sub-options: -* `window_size`: window size in milliseconds. Defaults to 1000. -* `sleep_limit`: number of federation requests from a single server in - a window before the server will delay processing the request. Defaults to 10. -* `sleep_delay`: duration in milliseconds to delay processing events - from remote servers by if they go over the sleep limit. Defaults to 500. -* `reject_limit`: maximum number of concurrent federation requests - allowed from a single server. Defaults to 50. -* `concurrent`: number of federation requests to concurrently process - from a single server. Defaults to 3. +This setting has the following sub-options: + +* `window_size` (integer): Window size in milliseconds. Defaults to `1000`. + +* `sleep_limit` (integer): Number of federation requests from a single server in a window before the server will delay processing the request. Defaults to `10`. + +* `sleep_delay` (integer): Duration in milliseconds to delay processing events from remote servers by if they go over the sleep limit. Defaults to `500`. + +* `reject_limit` (integer): Maximum number of concurrent federation requests allowed from a single server. Defaults to `50`. + +* `concurrent` (integer): Number of federation requests to concurrently process from a single server. Defaults to `3`. Example configuration: ```yaml @@ -1852,13 +1856,77 @@ rc_federation: concurrent: 5 ``` --- +### `rc_presence` + +*(object)* This option sets ratelimiting for presence. + +This setting has the following sub-options: + +* `per_user` (object): Sets rate limits on how often a specific users' presence updates are evaluated. Ratelimited presence updates sent via sync are ignored, and no error is returned to the client. This option also sets the rate limit for the [`PUT /_matrix/client/v3/presence/{userId}/status`] endpoint. + + [`PUT /_matrix/client/v3/presence/{userId}/status`]: + <https://spec.matrix.org/latest/client-server-api/#put_matrixclientv3presenceuseridstatus> + + This setting has the following sub-options: + + * `per_second` (number): Maximum number of requests a client can send per second. + + * `burst_count` (number): Maximum number of requests a client can send before being throttled. + +Default configuration: +```yaml +rc_presence: + per_user: + per_second: 0.1 + burst_count: 1.0 +``` + +Example configuration: +```yaml +rc_presence: + per_user: + per_second: 0.05 + burst_count: 1.0 +``` +--- +### `rc_delayed_event_mgmt` + +*(object)* Ratelimiting settings for delayed event management. + +This is a ratelimiting option that ratelimits attempts to restart, cancel, or view delayed events based on the sending client's account and device ID. + +Attempts to create or send delayed events are ratelimited not by this setting, but by `rc_message`. + +Setting this to a high value allows clients to make delayed event management requests often (such as repeatedly restarting a delayed event with a short timeout, or restarting several different delayed events all at once) without the risk of being ratelimited. + +This setting has the following sub-options: + +* `per_second` (number): Maximum number of requests a client can send per second. + +* `burst_count` (number): Maximum number of requests a client can send before being throttled. + +Default configuration: +```yaml +rc_delayed_event_mgmt: + per_user: + per_second: 1.0 + burst_count: 5.0 +``` + +Example configuration: +```yaml +rc_delayed_event_mgmt: + per_second: 2.0 + burst_count: 20.0 +``` +--- ### `federation_rr_transactions_per_room_per_second` -Sets outgoing federation transaction frequency for sending read-receipts, -per-room. +*(integer)* Sets outgoing federation transaction frequency for sending read-receipts, per-room. -If we end up trying to send out more read-receipts, they will get buffered up -into fewer transactions. Defaults to 50. +If we end up trying to send out more read-receipts, they will get buffered up into fewer transactions. + +Defaults to `50`. Example configuration: ```yaml @@ -1866,25 +1934,37 @@ federation_rr_transactions_per_room_per_second: 40 ``` --- ## Media Store + Config options related to Synapse's media store. --- ### `enable_authenticated_media` -When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy -unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false -after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but -this will change to true in a future Synapse release. +*(boolean)* When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) – requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to true (previously false). In a future release of Synapse, this option will be removed and become always-on. + +In all cases, authenticated requests to download media will succeed, but for unauthenticated requests, this case-by-case breakdown describes whether media downloads are permitted: + +* `enable_authenticated_media = False`: + * unauthenticated client or homeserver requesting local media: allowed + * unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache, or as long as the remote homeserver does not require authentication to retrieve the media +* `enable_authenticated_media = True`: + * unauthenticated client or homeserver requesting local media: allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); otherwise denied. + * unauthenticated client or homeserver requesting remote media: the same as for local media; allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); otherwise denied. + +It is especially notable that media downloaded before this option existed (in older Synapse versions), or whilst this option was set to `False`, will perpetually be available over the legacy, unauthenticated endpoint, even after this option is set to `True`. This is for backwards compatibility with older clients and homeservers that do not yet support requesting authenticated media; those older clients or homeservers will not be cut off from media they can already see. + +_Changed in Synapse 1.120:_ This option now defaults to `True` when not set, whereas before this version it defaulted to `False`. + +Defaults to `true`. Example configuration: ```yaml -enable_authenticated_media: true +enable_authenticated_media: false ``` --- ### `enable_media_repo` -Enable the media store service in the Synapse master. Defaults to true. -Set to false if you are using a separate media store worker. +*(boolean)* Enable the media store service in the Synapse master. Set to false if you are using a separate media store worker. Defaults to `true`. Example configuration: ```yaml @@ -1893,18 +1973,16 @@ enable_media_repo: false --- ### `media_store_path` -Directory where uploaded images and attachments are stored. +*(string)* Directory where uploaded images and attachments are stored. Defaults to `"media_store"`. Example configuration: ```yaml -media_store_path: "DATADIR/media_store" +media_store_path: DATADIR/media_store ``` --- ### `max_pending_media_uploads` -How many *pending media uploads* can a given user have? A pending media upload -is a created MXC URI that (a) is not expired (the `unused_expires_at` timestamp -has not passed) and (b) the media has not yet been uploaded for. Defaults to 5. +*(integer)* How many *pending media uploads* can a given user have? A pending media upload is a created MXC URI that (a) is not expired (the `unused_expires_at` timestamp has not passed) and (b) the media has not yet been uploaded for. Defaults to `5`. Example configuration: ```yaml @@ -1913,42 +1991,51 @@ max_pending_media_uploads: 5 --- ### `unused_expiration_time` -How long to wait in milliseconds before expiring created media IDs. Defaults to -"24h" +*(duration)* How long to wait in milliseconds before expiring created media IDs. Defaults to `"24h"`. Example configuration: ```yaml -unused_expiration_time: "1h" +unused_expiration_time: 1h ``` --- ### `media_storage_providers` -Media storage providers allow media to be stored in different -locations. Defaults to none. Associated sub-options are: -* `module`: type of resource, e.g. `file_system`. -* `store_local`: whether to store newly uploaded local files -* `store_remote`: whether to store newly downloaded local files -* `store_synchronous`: whether to wait for successful storage for local uploads -* `config`: sets a path to the resource through the `directory` option +*(array)* Media storage providers allow media to be stored in different locations. Defaults to `[]`. + +Options for each entry include: + +* `module` (string): Type of resource, e.g. `file_system`. + +* `store_local` (boolean): Whether to store newly uploaded local files. + +* `store_remote` (boolean): Whether to store newly downloaded local files. + +* `store_synchronous` (boolean): Whether to wait for successful storage for local uploads. + +* `config` (object): Sets a path to the resource through the `directory` option. + + This setting has the following sub-options: + + * `directory` (string): Path to the resource. Example configuration: ```yaml media_storage_providers: - - module: file_system - store_local: false - store_remote: false - store_synchronous: false - config: - directory: /mnt/some/other/directory +- module: file_system + store_local: false + store_remote: false + store_synchronous: false + config: + directory: /mnt/some/other/directory ``` --- ### `max_upload_size` -The largest allowed upload size in bytes. +*(byte size)* The largest allowed upload size in bytes. -If you are using a reverse proxy you may also need to set this value in -your reverse proxy's config. Defaults to 50M. Notably Nginx has a small max body size by default. -See [here](../../reverse_proxy.md) for more on using a reverse proxy with Synapse. +If you are using a reverse proxy you may also need to set this value in your reverse proxy's config. Notably Nginx has a small max body size by default. See [here](../../reverse_proxy.md) for more on using a reverse proxy with Synapse. + +Defaults to `"50M"`. Example configuration: ```yaml @@ -1957,7 +2044,7 @@ max_upload_size: 60M --- ### `max_image_pixels` -Maximum number of pixels that will be thumbnailed. Defaults to 32M. +*(byte size)* Maximum number of pixels that will be thumbnailed. Defaults to `"32M"`. Example configuration: ```yaml @@ -1966,7 +2053,7 @@ max_image_pixels: 35M --- ### `remote_media_download_burst_count` -Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized - if the bucket is full, ie a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains. Defaults to 500MiB. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests. +*(byte size)* Remote media downloads are ratelimited using a [leaky bucket algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given "bucket" is keyed to the IP address of the requester when requesting remote media downloads. This configuration option sets the size of the bucket against which the size in bytes of downloads are penalized – if the bucket is full, i.e. a given number of bytes have already been downloaded, further downloads will be denied until the bucket drains. See also `remote_media_download_per_second` which determines the rate at which the "bucket" is emptied and thus has available space to authorize new requests. Defaults to `"500MiB"`. Example configuration: ```yaml @@ -1975,7 +2062,7 @@ remote_media_download_burst_count: 200M --- ### `remote_media_download_per_second` -Works in conjunction with `remote_media_download_burst_count` to ratelimit remote media downloads - this configuration option determines the rate at which the "bucket" (see above) leaks in bytes per second. As requests are made to download remote media, the size of those requests in bytes is added to the bucket, and once the bucket has reached it's capacity, no more requests will be allowed until a number of bytes has "drained" from the bucket. This setting determines the rate at which bytes drain from the bucket, with the practical effect that the larger the number, the faster the bucket leaks, allowing for more bytes downloaded over a shorter period of time. Defaults to 87KiB per second. See also `remote_media_download_burst_count`. +*(byte size)* Works in conjunction with `remote_media_download_burst_count` to ratelimit remote media downloads – this configuration option determines the rate at which the "bucket" (see above) leaks in bytes per second. As requests are made to download remote media, the size of those requests in bytes is added to the bucket, and once the bucket has reached it's capacity, no more requests will be allowed until a number of bytes has "drained" from the bucket. This setting determines the rate at which bytes drain from the bucket, with the practical effect that the larger the number, the faster the bucket leaks, allowing for more bytes downloaded over a shorter period of time. Defaults to 87KiB per second. See also `remote_media_download_burst_count`. Defaults to `"87KiB"`. Example configuration: ```yaml @@ -1984,36 +2071,24 @@ remote_media_download_per_second: 40K --- ### `prevent_media_downloads_from` -A list of domains to never download media from. Media from these -domains that is already downloaded will not be deleted, but will be -inaccessible to users. This option does not affect admin APIs trying -to download/operate on media. +*(array)* A list of domains to never download media from. Media from these domains that is already downloaded will not be deleted, but will be inaccessible to users. This option does not affect admin APIs trying to download/operate on media. -This will not prevent the listed domains from accessing media themselves. -It simply prevents users on this server from downloading media originating -from the listed servers. +This will not prevent the listed domains from accessing media themselves. It simply prevents users on this server from downloading media originating from the listed servers. -This will have no effect on media originating from the local server. This only -affects media downloaded from other Matrix servers, to control URL previews see -[`url_preview_ip_range_blacklist`](#url_preview_ip_range_blacklist) or -[`url_preview_url_blacklist`](#url_preview_url_blacklist). +This will have no effect on media originating from the local server. This only affects media downloaded from other Matrix servers, to control URL previews see [`url_preview_ip_range_blacklist`](#url_preview_ip_range_blacklist) or [`url_preview_url_blacklist`](#url_preview_url_blacklist). -Defaults to an empty list (nothing blocked). +Defaults to `[]`. Example configuration: ```yaml prevent_media_downloads_from: - - evil.example.org - - evil2.example.org +- evil.example.org +- evil2.example.org ``` --- ### `dynamic_thumbnails` -Whether to generate new thumbnails on the fly to precisely match -the resolution requested by the client. If true then whenever -a new resolution is requested by the client the server will -generate a new thumbnail. If false the server will pick a thumbnail -from a precalculated list. Defaults to false. +*(boolean)* Whether to generate new thumbnails on the fly to precisely match the resolution requested by the client. If true then whenever a new resolution is requested by the client the server will generate a new thumbnail. If false the server will pick a thumbnail from a precalculated list. Defaults to `false`. Example configuration: ```yaml @@ -2022,68 +2097,62 @@ dynamic_thumbnails: true --- ### `thumbnail_sizes` -List of thumbnails to precalculate when an image is uploaded. Associated sub-options are: -* `width` -* `height` -* `method`: i.e. `crop`, `scale`, etc. +*(array)* List of thumbnails to precalculate when an image is uploaded. -Example configuration: +Options for each entry include: + +* `width` (integer): Width of the generated thumbnail. + +* `height` (integer): Height of the generated thumbnail. + +* `method` (string): Method to fit the thumbnail dimensions. Current options are `crop` and `scale`. + +Default configuration: ```yaml thumbnail_sizes: - - width: 32 - height: 32 - method: crop - - width: 96 - height: 96 - method: crop - - width: 320 - height: 240 - method: scale - - width: 640 - height: 480 - method: scale - - width: 800 - height: 600 - method: scale +- width: 32 + height: 32 + method: crop +- width: 96 + height: 96 + method: crop +- width: 320 + height: 240 + method: scale +- width: 640 + height: 480 + method: scale +- width: 800 + height: 600 + method: scale ``` --- ### `media_retention` -Controls whether local media and entries in the remote media cache -(media that is downloaded from other homeservers) should be removed -under certain conditions, typically for the purpose of saving space. +*(object)* Controls whether local media and entries in the remote media cache (media that is downloaded from other homeservers) should be removed under certain conditions, typically for the purpose of saving space. + +Purging media files will be the carried out by the media worker (that is, the worker that has the `enable_media_repo` homeserver config option set to `true`). This may be the main process. -Purging media files will be the carried out by the media worker -(that is, the worker that has the `enable_media_repo` homeserver config -option set to 'true'). This may be the main process. +The `media_retention.local_media_lifetime` and `media_retention.remote_media_lifetime` config options control whether media will be purged if it has not been accessed in a given amount of time. Note that media is "accessed" when loaded in a room in a client, or otherwise downloaded by a local or remote user. If the media has never been accessed, the media's creation time is used instead. Both thumbnails and the original media will be removed. If either of these options are unset, then media of that type will not be purged. -The `media_retention.local_media_lifetime` and -`media_retention.remote_media_lifetime` config options control whether -media will be purged if it has not been accessed in a given amount of -time. Note that media is 'accessed' when loaded in a room in a client, or -otherwise downloaded by a local or remote user. If the media has never -been accessed, the media's creation time is used instead. Both thumbnails -and the original media will be removed. If either of these options are unset, -then media of that type will not be purged. +Local or cached remote media that has been [quarantined](../../admin_api/media_admin_api.md#quarantining-media-in-a-room) will not be deleted. Similarly, local media that has been marked as [protected from quarantine](../../admin_api/media_admin_api.md#protecting-media-from-being-quarantined) will not be deleted. + +This setting has the following sub-options: -Local or cached remote media that has been -[quarantined](../../admin_api/media_admin_api.md#quarantining-media-in-a-room) -will not be deleted. Similarly, local media that has been marked as -[protected from quarantine](../../admin_api/media_admin_api.md#protecting-media-from-being-quarantined) -will not be deleted. +* `local_media_lifetime`: Duration without access to a local media resource after which it will be purged. If the media has never been accessed, the media's creation time is used instead. Both thumbnails and the original media will be removed. If unset or null, local media will not be purged. Defaults to `null`. + +* `remote_media_lifetime`: Duration without access to a remote media resource after which it will be purged. If the media has never been accessed, the media's creation time is used instead. Both thumbnails and the original media will be removed. If unset or null, remote media will not be purged. Defaults to `null`. Example configuration: ```yaml media_retention: - local_media_lifetime: 90d - remote_media_lifetime: 14d + local_media_lifetime: 90d + remote_media_lifetime: 14d ``` --- ### `url_preview_enabled` -This setting determines whether the preview URL API is enabled. -It is disabled by default. Set to true to enable. If enabled you must specify a -`url_preview_ip_range_blacklist` blacklist. +*(boolean)* This setting determines whether the preview URL API is enabled. Set to true to enable. If enabled you must specify a `url_preview_ip_range_blacklist` blacklist. Defaults to `false`. Example configuration: ```yaml @@ -2092,111 +2161,80 @@ url_preview_enabled: true --- ### `url_preview_ip_range_blacklist` -List of IP address CIDR ranges that the URL preview spider is denied -from accessing. There are no defaults: you must explicitly -specify a list for URL previewing to work. You should specify any -internal services in your network that you do not want synapse to try -to connect to, otherwise anyone in any Matrix room could cause your -synapse to issue arbitrary GET requests to your internal services, -causing serious security issues. +*(array|null)* List of IP address CIDR ranges that the URL preview spider is denied from accessing. There are no defaults: you must explicitly specify a list for URL previewing to work. You should specify any internal services in your network that you do not want synapse to try to connect to, otherwise anyone in any Matrix room could cause your synapse to issue arbitrary GET requests to your internal services, causing serious security issues. -(0.0.0.0 and :: are always blacklisted, whether or not they are explicitly -listed here, since they correspond to unroutable addresses.) +(0.0.0.0 and :: are always blacklisted, whether or not they are explicitly listed here, since they correspond to unroutable addresses.) -This must be specified if `url_preview_enabled` is set. It is recommended that -you use the following example list as a starting point. +This must be specified if `url_preview_enabled` is set. It is recommended that you use the following example list as a starting point. Note: The value is ignored when an HTTP proxy is in use. +Defaults to `null`. + Example configuration: ```yaml url_preview_ip_range_blacklist: - - '127.0.0.0/8' - - '10.0.0.0/8' - - '172.16.0.0/12' - - '192.168.0.0/16' - - '100.64.0.0/10' - - '192.0.0.0/24' - - '169.254.0.0/16' - - '192.88.99.0/24' - - '198.18.0.0/15' - - '192.0.2.0/24' - - '198.51.100.0/24' - - '203.0.113.0/24' - - '224.0.0.0/4' - - '::1/128' - - 'fe80::/10' - - 'fc00::/7' - - '2001:db8::/32' - - 'ff00::/8' - - 'fec0::/10' +- 127.0.0.0/8 +- 10.0.0.0/8 +- 172.16.0.0/12 +- 192.168.0.0/16 +- 100.64.0.0/10 +- 192.0.0.0/24 +- 169.254.0.0/16 +- 192.88.99.0/24 +- 198.18.0.0/15 +- 192.0.2.0/24 +- 198.51.100.0/24 +- 203.0.113.0/24 +- 224.0.0.0/4 +- ::1/128 +- fe80::/10 +- fc00::/7 +- 2001:db8::/32 +- ff00::/8 +- fec0::/10 ``` --- ### `url_preview_ip_range_whitelist` -This option sets a list of IP address CIDR ranges that the URL preview spider is allowed -to access even if they are specified in `url_preview_ip_range_blacklist`. -This is useful for specifying exceptions to wide-ranging blacklisted -target IP ranges - e.g. for enabling URL previews for a specific private -website only visible in your network. Defaults to none. +*(array)* This option sets a list of IP address CIDR ranges that the URL preview spider is allowed to access even if they are specified in `url_preview_ip_range_blacklist`. This is useful for specifying exceptions to wide-ranging blacklisted target IP ranges – e.g. for enabling URL previews for a specific private website only visible in your network. Defaults to `[]`. Example configuration: ```yaml url_preview_ip_range_whitelist: - - '192.168.1.1' +- 192.168.1.1 ``` --- ### `url_preview_url_blacklist` -Optional list of URL matches that the URL preview spider is denied from -accessing. This is a usability feature, not a security one. You should use -`url_preview_ip_range_blacklist` in preference to this, otherwise someone could -define a public DNS entry that points to a private IP address and circumvent -the blacklist. Applications that perform redirects or serve different content -when detecting that Synapse is accessing them can also bypass the blacklist. -This is more useful if you know there is an entire shape of URL that you know -that you do not want Synapse to preview. +*(array)* Optional list of URL matches that the URL preview spider is denied from accessing. This is a usability feature, not a security one. You should use `url_preview_ip_range_blacklist` in preference to this, otherwise someone could define a public DNS entry that points to a private IP address and circumvent the blacklist. Applications that perform redirects or serve different content when detecting that Synapse is accessing them can also bypass the blacklist. This is more useful if you know there is an entire shape of URL that you know that you do not want Synapse to preview. -Each list entry is a dictionary of url component attributes as returned -by urlparse.urlsplit as applied to the absolute form of the URL. See -[here](https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit) for more -information. Some examples are: +Each list entry is a dictionary of url component attributes as returned by urlparse.urlsplit as applied to the absolute form of the URL. See [here](https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit) for more information. Some examples are: * `username` * `netloc` * `scheme` * `path` -The values of the dictionary are treated as a filename match pattern -applied to that component of URLs, unless they start with a ^ in which -case they are treated as a regular expression match. If all the -specified component matches for a given list item succeed, the URL is -blacklisted. +The values of the dictionary are treated as a filename match pattern applied to that component of URLs, unless they start with a ^ in which case they are treated as a regular expression match. If all the specified component matches for a given list item succeed, the URL is blacklisted. + +Defaults to `[]`. Example configuration: ```yaml url_preview_url_blacklist: - # blacklist any URL with a username in its URI - - username: '*' - - # blacklist all *.google.com URLs - - netloc: 'google.com' - - netloc: '*.google.com' - - # blacklist all plain HTTP URLs - - scheme: 'http' - - # blacklist http(s)://www.acme.com/foo - - netloc: 'www.acme.com' - path: '/foo' - - # blacklist any URL with a literal IPv4 address - - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' +- username: '*' +- netloc: google.com +- netloc: '*.google.com' +- scheme: http +- netloc: www.acme.com + path: /foo +- netloc: ^[0-9]+.[0-9]+.[0-9]+.[0-9]+$ ``` --- ### `max_spider_size` -The largest allowed URL preview spidering size in bytes. Defaults to 10M. +*(byte size)* The largest allowed URL preview spidering size in bytes. Defaults to `"10M"`. Example configuration: ```yaml @@ -2205,43 +2243,43 @@ max_spider_size: 8M --- ### `url_preview_accept_language` -A list of values for the Accept-Language HTTP header used when -downloading webpages during URL preview generation. This allows -Synapse to specify the preferred languages that URL previews should -be in when communicating with remote servers. +*(array)* A list of values for the Accept-Language HTTP header used when downloading webpages during URL preview generation. This allows Synapse to specify the preferred languages that URL previews should be in when communicating with remote servers. -Each value is a IETF language tag; a 2-3 letter identifier for a -language, optionally followed by subtags separated by '-', specifying -a country or region variant. +Each value is a IETF language tag; a 2-3 letter identifier for a language, optionally followed by subtags separated by `-`, specifying a country or region variant. -Multiple values can be provided, and a weight can be added to each by -using quality value syntax (;q=). '*' translates to any language. +Multiple values can be provided, and a weight can be added to each by using quality value syntax (;q=). `*` translates to any language. -Defaults to "en". +Default configuration: +```yaml +url_preview_accept_language: +- en +``` Example configuration: ```yaml - url_preview_accept_language: - - 'en-UK' - - 'en-US;q=0.9' - - 'fr;q=0.8' - - '*;q=0.7' +url_preview_accept_language: +- en-UK +- en-US;q=0.9 +- fr;q=0.8 +- '*;q=0.7' ``` --- ### `oembed` -oEmbed allows for easier embedding content from a website. It can be -used for generating URLs previews of services which support it. A default list of oEmbed providers -is included with Synapse. Set `disable_default_providers` to true to disable using -these default oEmbed URLs. Use `additional_providers` to specify additional files with oEmbed configuration (each -should be in the form of providers.json). By default this list is empty. +*(object)* oEmbed allows for easier embedding content from a website. It can be used for generating URLs previews of services which support it. A default list of oEmbed providers is included with Synapse. + +This setting has the following sub-options: + +* `disable_default_providers` (boolean): Do not use Synapse's default list of oEmbed providers. Defaults to `false`. + +* `additional_providers` (array): Additional files with oEmbed configuration (each should be in the form of providers.json). Defaults to `[]`. Example configuration: ```yaml oembed: disable_default_providers: true additional_providers: - - oembed/my_providers.json + - oembed/my_providers.json ``` --- ## Captcha @@ -2251,33 +2289,30 @@ See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha. --- ### `recaptcha_public_key` -This homeserver's ReCAPTCHA public key. Must be specified if -[`enable_registration_captcha`](#enable_registration_captcha) is enabled. +*(string|null)* This homeserver's ReCAPTCHA public key. Must be specified if [`enable_registration_captcha`](#enable_registration_captcha) is enabled. Defaults to `null`. Example configuration: ```yaml -recaptcha_public_key: "YOUR_PUBLIC_KEY" +recaptcha_public_key: YOUR_PUBLIC_KEY ``` --- ### `recaptcha_private_key` -This homeserver's ReCAPTCHA private key. Must be specified if -[`enable_registration_captcha`](#enable_registration_captcha) is -enabled. +*(string|null)* This homeserver's ReCAPTCHA private key. Must be specified if [`enable_registration_captcha`](#enable_registration_captcha) is enabled. Defaults to `null`. Example configuration: ```yaml -recaptcha_private_key: "YOUR_PRIVATE_KEY" +recaptcha_private_key: YOUR_PRIVATE_KEY ``` --- ### `enable_registration_captcha` -Set to `true` to require users to complete a CAPTCHA test when registering an account. -Requires a valid ReCaptcha public/private key. -Defaults to `false`. +*(boolean)* Set to `true` to require users to complete a CAPTCHA test when registering an account. Requires a valid ReCaptcha public/private key. Note that [`enable_registration`](#enable_registration) must also be set to allow account registration. +Defaults to `false`. + Example configuration: ```yaml enable_registration_captcha: true @@ -2285,166 +2320,131 @@ enable_registration_captcha: true --- ### `recaptcha_siteverify_api` -The API endpoint to use for verifying `m.login.recaptcha` responses. -Defaults to `https://www.recaptcha.net/recaptcha/api/siteverify`. +*(string)* The API endpoint to use for verifying `m.login.recaptcha` responses. Defaults to `"https://www.recaptcha.net/recaptcha/api/siteverify"`. Example configuration: ```yaml -recaptcha_siteverify_api: "https://my.recaptcha.site" +recaptcha_siteverify_api: https://my.recaptcha.site ``` --- ## TURN + Options related to adding a TURN server to Synapse. --- ### `turn_uris` -The public URIs of the TURN server to give to clients. +*(array)* The public URIs of the TURN server to give to clients. Defaults to `[]`. Example configuration: ```yaml -turn_uris: [turn:example.org] +turn_uris: +- turn:example.org ``` --- ### `turn_shared_secret` -The shared secret used to compute passwords for the TURN server. +*(string|null)* The shared secret used to compute passwords for the TURN server. Defaults to `null`. Example configuration: ```yaml -turn_shared_secret: "YOUR_SHARED_SECRET" +turn_shared_secret: YOUR_SHARED_SECRET ``` --- -### `turn_username` and `turn_password` +### `turn_shared_secret_path` -The Username and password if the TURN server needs them and does not use a token. +*(string|null)* An alternative to [`turn_shared_secret`](#turn_shared_secret): allows the shared secret to be specified in an external file. -Example configuration: -```yaml -turn_username: "TURNSERVER_USERNAME" -turn_password: "TURNSERVER_PASSWORD" -``` ---- -### `turn_user_lifetime` +The file should be a plain text file, containing only the shared secret. Synapse reads the shared secret from the given file once at startup. -How long generated TURN credentials last. Defaults to 1h. +_Added in Synapse 1.116.0._ + +Defaults to `null`. Example configuration: ```yaml -turn_user_lifetime: 2h +turn_shared_secret_path: /path/to/secrets/file ``` --- -### `turn_allow_guests` +### `turn_username` -Whether guests should be allowed to use the TURN server. This defaults to true, otherwise -VoIP will be unreliable for guests. However, it does introduce a slight security risk as -it allows users to connect to arbitrary endpoints without having first signed up for a valid account (e.g. by passing a CAPTCHA). +*(string|null)* TURN server username if not using a token. Defaults to `null`. Example configuration: ```yaml -turn_allow_guests: false +turn_username: TURNSERVER_USERNAME ``` --- -## Registration ## - -Registration can be rate-limited using the parameters in the [Ratelimiting](#ratelimiting) section of this manual. - ---- -### `enable_registration` - -Enable registration for new users. Defaults to `false`. - -It is highly recommended that if you enable registration, you set one or more -or the following options, to avoid abuse of your server by "bots": +### `turn_password` - * [`enable_registration_captcha`](#enable_registration_captcha) - * [`registrations_require_3pid`](#registrations_require_3pid) - * [`registration_requires_token`](#registration_requires_token) - -(In order to enable registration without any verification, you must also set -[`enable_registration_without_verification`](#enable_registration_without_verification).) - -Note that even if this setting is disabled, new accounts can still be created -via the admin API if -[`registration_shared_secret`](#registration_shared_secret) is set. +*(string|null)* TURN server password if not using a token. Defaults to `null`. Example configuration: ```yaml -enable_registration: true +turn_password: TURNSERVER_PASSWORD ``` --- -### `enable_registration_without_verification` +### `turn_user_lifetime` -Enable registration without email or captcha verification. Note: this option is *not* recommended, -as registration without verification is a known vector for spam and abuse. Defaults to `false`. Has no effect -unless [`enable_registration`](#enable_registration) is also enabled. +*(duration)* How long generated TURN credentials last. Defaults to `"1h"`. Example configuration: ```yaml -enable_registration_without_verification: true +turn_user_lifetime: 2h ``` --- -### `registrations_require_3pid` - -If this is set, users must provide all of the specified types of [3PID](https://spec.matrix.org/latest/appendices/#3pid-types) when registering an account. +### `turn_allow_guests` -Note that [`enable_registration`](#enable_registration) must also be set to allow account registration. +*(boolean)* Whether guests should be allowed to use the TURN server. If false, VoIP will be unreliable for guests. However, it does introduce a slight security risk as it allows users to connect to arbitrary endpoints without having first signed up for a valid account (e.g. by passing a CAPTCHA). Defaults to `true`. Example configuration: ```yaml -registrations_require_3pid: - - email - - msisdn +turn_allow_guests: false ``` --- -### `disable_msisdn_registration` +## Registration -Explicitly disable asking for MSISDNs from the registration -flow (overrides `registrations_require_3pid` if MSISDNs are set as required). +Registration can be rate-limited using the parameters in the [Ratelimiting](#ratelimiting) section of this manual. -Example configuration: -```yaml -disable_msisdn_registration: true -``` --- -### `allowed_local_3pids` +### `enable_registration` -Mandate that users are only allowed to associate certain formats of -3PIDs with accounts on this server, as specified by the `medium` and `pattern` sub-options. -`pattern` is a [Perl-like regular expression](https://docs.python.org/3/library/re.html#module-re). +*(boolean)* Enable registration for new users. -More information about 3PIDs, allowed `medium` types and their `address` syntax can be found [in the Matrix spec](https://spec.matrix.org/latest/appendices/#3pid-types). +It is highly recommended that if you enable registration, you set one or more or the following options, to avoid abuse of your server by "bots": + +* [`enable_registration_captcha`](#enable_registration_captcha) +* [`registrations_require_3pid`](#registrations_require_3pid) +* [`registration_requires_token`](#registration_requires_token) + +(In order to enable registration without any verification, you must also set [`enable_registration_without_verification`](#enable_registration_without_verification).) + +Note that even if this setting is disabled, new accounts can still be created via the admin API if [`registration_shared_secret`](#registration_shared_secret) is set. + +Defaults to `false`. Example configuration: ```yaml -allowed_local_3pids: - - medium: email - pattern: '^[^@]+@matrix\.org$' - - medium: email - pattern: '^[^@]+@vector\.im$' - - medium: msisdn - pattern: '^44\d{10}$' +enable_registration: true ``` --- -### `enable_3pid_lookup` +### `enable_registration_without_verification` -Enable 3PIDs lookup requests to identity servers from this server. Defaults to true. +*(boolean)* Enable registration without email or captcha verification. Note: this option is *not* recommended, as registration without verification is a known vector for spam and abuse. Has no effect unless [`enable_registration`](#enable_registration) is also enabled. Defaults to `false`. Example configuration: ```yaml -enable_3pid_lookup: false +enable_registration_without_verification: true ``` --- ### `registration_requires_token` -Require users to submit a token during registration. -Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md). -Disabling this option will not delete any tokens previously generated. -Defaults to `false`. Set to `true` to enable. - +*(boolean)* Require users to submit a token during registration. Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md). Disabling this option will not delete any tokens previously generated. Note that [`enable_registration`](#enable_registration) must also be set to allow account registration. +Defaults to `false`. + Example configuration: ```yaml registration_requires_token: true @@ -2452,47 +2452,44 @@ registration_requires_token: true --- ### `registration_shared_secret` -If set, allows registration of standard or admin accounts by anyone who has the -shared secret, even if [`enable_registration`](#enable_registration) is not -set. +*(string|null)* If set, allows registration of standard or admin accounts by anyone who has the shared secret, even if [`enable_registration`](#enable_registration) is not set. + +This is primarily intended for use with the `register_new_matrix_user` script (see [Registering a user](../../setup/installation.md#registering-a-user)); however, the interface is [documented](../../admin_api/register_api.html). + +Replacing an existing `registration_shared_secret` with a new one requires users of the [Shared-Secret Registration API](../../admin_api/register_api.html) to start using the new secret for requesting any further one-time nonces. -This is primarily intended for use with the `register_new_matrix_user` script -(see [Registering a user](../../setup/installation.md#registering-a-user)); -however, the interface is [documented](../../admin_api/register_api.html). +> ⚠️ **Warning** – The additional consequences of replacing [`macaroon_secret_key`](#macaroon_secret_key) will apply in case it delegates to `registration_shared_secret`. See also [`registration_shared_secret_path`](#registration_shared_secret_path). +Defaults to `null`. + Example configuration: ```yaml registration_shared_secret: <PRIVATE STRING> ``` - --- ### `registration_shared_secret_path` -An alternative to [`registration_shared_secret`](#registration_shared_secret): -allows the shared secret to be specified in an external file. +*(string|null)* An alternative to [`registration_shared_secret`](#registration_shared_secret): allows the shared secret to be specified in an external file. The file should be a plain text file, containing only the shared secret. -If this file does not exist, Synapse will create a new shared -secret on startup and store it in this file. +If this file does not exist, Synapse will create a new shared secret on startup and store it in this file. + +_Added in Synapse 1.67.0._ + +Defaults to `null`. Example configuration: ```yaml registration_shared_secret_path: /path/to/secrets/file ``` - -_Added in Synapse 1.67.0._ - --- ### `bcrypt_rounds` -Set the number of bcrypt rounds used to generate password hash. -Larger numbers increase the work factor needed to generate the hash. -The default number is 12 (which equates to 2^12 rounds). -N.B. that increasing this will exponentially increase the time required -to register or login - e.g. 24 => 2^24 rounds which will take >20 mins. +*(integer)* Set the number of bcrypt rounds used to generate password hash. Larger numbers increase the work factor needed to generate the hash. The default number is 12 (which equates to 2^12 rounds). N.B. that increasing this will exponentially increase the time required to register or login - e.g. 24 => 2^24 rounds which will take >20 mins. Defaults to `12`. + Example configuration: ```yaml bcrypt_rounds: 14 @@ -2500,63 +2497,20 @@ bcrypt_rounds: 14 --- ### `allow_guest_access` -Allows users to register as guests without a password/email/etc, and -participate in rooms hosted on this server which have been made -accessible to anonymous users. Defaults to false. +*(boolean)* Allows users to register as guests without a password/email/etc, and participate in rooms hosted on this server which have been made accessible to anonymous users. Defaults to `false`. Example configuration: ```yaml allow_guest_access: true ``` --- -### `default_identity_server` - -The identity server which we suggest that clients should use when users log -in on this server. - -(By default, no suggestion is made, so it is left up to the client. -This setting is ignored unless `public_baseurl` is also explicitly set.) - -Example configuration: -```yaml -default_identity_server: https://matrix.org -``` ---- -### `account_threepid_delegates` - -Delegate verification of phone numbers to an identity server. - -When a user wishes to add a phone number to their account, we need to verify that they -actually own that phone number, which requires sending them a text message (SMS). -Currently Synapse does not support sending those texts itself and instead delegates the -task to an identity server. The base URI for the identity server to be used is -specified by the `account_threepid_delegates.msisdn` option. - -If this is left unspecified, Synapse will not allow users to add phone numbers to -their account. - -(Servers handling the these requests must answer the `/requestToken` endpoints defined -by the Matrix Identity Service API -[specification](https://matrix.org/docs/spec/identity_service/latest).) - -*Deprecated in Synapse 1.64.0*: The `email` option is deprecated. - -*Removed in Synapse 1.66.0*: The `email` option has been removed. -If present, Synapse will report a configuration error on startup. - -Example configuration: -```yaml -account_threepid_delegates: - msisdn: http://localhost:8090 # Delegate SMS sending to this local process -``` ---- ### `enable_set_displayname` -Whether users are allowed to change their displayname after it has -been initially set. Useful when provisioning users based on the -contents of a third-party directory. +*(boolean)* Whether users are allowed to change their displayname after it has been initially set. Useful when provisioning users based on the contents of a third-party directory. + +Does not apply to server administrators. -Does not apply to server administrators. Defaults to true. +Defaults to `true`. Example configuration: ```yaml @@ -2565,64 +2519,43 @@ enable_set_displayname: false --- ### `enable_set_avatar_url` -Whether users are allowed to change their avatar after it has been -initially set. Useful when provisioning users based on the contents -of a third-party directory. +*(boolean)* Whether users are allowed to change their avatar after it has been initially set. Useful when provisioning users based on the contents of a third-party directory. -Does not apply to server administrators. Defaults to true. +Does not apply to server administrators. -Example configuration: -```yaml -enable_set_avatar_url: false -``` ---- -### `enable_3pid_changes` - -Whether users can change the third-party IDs associated with their accounts -(email address and msisdn). - -Defaults to true. +Defaults to `true`. Example configuration: ```yaml -enable_3pid_changes: false +enable_set_avatar_url: false ``` --- ### `auto_join_rooms` -Users who register on this homeserver will automatically be joined -to the rooms listed under this option. +*(array)* Users who register on this homeserver will automatically be joined to the rooms listed under this option. + +By default, any room aliases included in this list will be created as a publicly joinable room when the first user registers for the homeserver. If the room already exists, make certain it is a publicly joinable room, i.e. the join rule of the room must be set to `public`. You can find more options relating to auto-joining rooms below. -By default, any room aliases included in this list will be created -as a publicly joinable room when the first user registers for the -homeserver. If the room already exists, make certain it is a publicly joinable -room, i.e. the join rule of the room must be set to 'public'. You can find more options -relating to auto-joining rooms below. +As Spaces are just rooms under the hood, Space aliases may also be used. -As Spaces are just rooms under the hood, Space aliases may also be -used. +Defaults to `[]`. Example configuration: ```yaml auto_join_rooms: - - "#exampleroom:example.com" - - "#anotherexampleroom:example.com" +- '#exampleroom:example.com' +- '#anotherexampleroom:example.com' ``` --- ### `autocreate_auto_join_rooms` -Where `auto_join_rooms` are specified, setting this flag ensures that -the rooms exist by creating them when the first user on the -homeserver registers. This option will not create Spaces. +*(boolean)* Where `auto_join_rooms` are specified, setting this flag ensures that the rooms exist by creating them when the first user on the homeserver registers. This option will not create Spaces. -By default the auto-created rooms are publicly joinable from any federated -server. Use the `autocreate_auto_join_rooms_federated` and -`autocreate_auto_join_room_preset` settings to customise this behaviour. +By default the auto-created rooms are publicly joinable from any federated server. Use the `autocreate_auto_join_rooms_federated` and `autocreate_auto_join_room_preset` settings to customise this behaviour. -Setting to false means that if the rooms are not manually created, -users cannot be auto-joined since they do not exist. +Setting to false means that if the rooms are not manually created, users cannot be auto-joined since they do not exist. -Defaults to true. +Defaults to `true`. Example configuration: ```yaml @@ -2631,15 +2564,13 @@ autocreate_auto_join_rooms: false --- ### `autocreate_auto_join_rooms_federated` -Whether the rooms listed in `auto_join_rooms` that are auto-created are available -via federation. Only has an effect if `autocreate_auto_join_rooms` is true. +*(boolean)* Whether the rooms listed in `auto_join_rooms` that are auto-created are available via federation. Only has an effect if `autocreate_auto_join_rooms` is true. + +Note that whether a room is federated cannot be modified after creation. -Note that whether a room is federated cannot be modified after -creation. +If true, the room will be joinable from other servers. If false, users from other homeservers are prevented from joining these rooms. -Defaults to true: the room will be joinable from other servers. -Set to false to prevent users from other homeservers from -joining these rooms. +Defaults to `true`. Example configuration: ```yaml @@ -2648,25 +2579,18 @@ autocreate_auto_join_rooms_federated: false --- ### `autocreate_auto_join_room_preset` -The room preset to use when auto-creating one of `auto_join_rooms`. Only has an -effect if `autocreate_auto_join_rooms` is true. +*(string)* The room preset to use when auto-creating one of `auto_join_rooms`. Only has an effect if `autocreate_auto_join_rooms` is true. Possible values for this option are: -* "public_chat": the room is joinable by anyone, including - federated servers if `autocreate_auto_join_rooms_federated` is true (the default). +* "public_chat": the room is joinable by anyone, including federated servers if `autocreate_auto_join_rooms_federated` is true (the default). * "private_chat": an invitation is required to join these rooms. -* "trusted_private_chat": an invitation is required to join this room and the invitee is - assigned a power level of 100 upon joining the room. +* "trusted_private_chat": an invitation is required to join this room and the invitee is assigned a power level of 100 upon joining the room. -Each preset will set up a room in the same manner as if it were provided as the `preset` parameter when -calling the -[`POST /_matrix/client/v3/createRoom`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom) -Client-Server API endpoint. +Each preset will set up a room in the same manner as if it were provided as the `preset` parameter when calling the [`POST /_matrix/client/v3/createRoom`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom) Client-Server API endpoint. -If a value of "private_chat" or "trusted_private_chat" is used then -`auto_join_mxid_localpart` must also be configured. +If a value of "private_chat" or "trusted_private_chat" is used then `auto_join_mxid_localpart` must also be configured. -Defaults to "public_chat". +Defaults to `"public_chat"`. Example configuration: ```yaml @@ -2675,22 +2599,17 @@ autocreate_auto_join_room_preset: private_chat --- ### `auto_join_mxid_localpart` -The local part of the user id which is used to create `auto_join_rooms` if -`autocreate_auto_join_rooms` is true. If this is not provided then the -initial user account that registers will be used to create the rooms. +*(string|null)* The local part of the user id which is used to create `auto_join_rooms` if `autocreate_auto_join_rooms` is true. If this is not provided then the initial user account that registers will be used to create the rooms. + +The user id is also used to invite new users to any auto-join rooms which are set to invite-only. -The user id is also used to invite new users to any auto-join rooms which -are set to invite-only. +It *must* be configured if `autocreate_auto_join_room_preset` is set to "private_chat" or "trusted_private_chat". -It *must* be configured if `autocreate_auto_join_room_preset` is set to -"private_chat" or "trusted_private_chat". +Note that this must be specified in order for new users to be correctly invited to any auto-join rooms which have been set to invite-only (either at the time of creation or subsequently). -Note that this must be specified in order for new users to be correctly -invited to any auto-join rooms which have been set to invite-only (either -at the time of creation or subsequently). +Note that, if the room already exists, this user must be joined and have the appropriate permissions to invite new members. -Note that, if the room already exists, this user must be joined and -have the appropriate permissions to invite new members. +Defaults to `null`. Example configuration: ```yaml @@ -2699,10 +2618,7 @@ auto_join_mxid_localpart: system --- ### `auto_join_rooms_for_guests` -When `auto_join_rooms` is specified, setting this flag to false prevents -guest accounts from being automatically joined to the rooms. - -Defaults to true. +*(boolean)* When `auto_join_rooms` is specified, setting this flag to false prevents guest accounts from being automatically joined to the rooms. Defaults to `true`. Example configuration: ```yaml @@ -2711,31 +2627,36 @@ auto_join_rooms_for_guests: false --- ### `inhibit_user_in_use_error` -Whether to inhibit errors raised when registering a new account if the user ID -already exists. If turned on, requests to `/register/available` will always -show a user ID as available, and Synapse won't raise an error when starting -a registration with a user ID that already exists. However, Synapse will still -raise an error if the registration completes and the username conflicts. - -Defaults to false. +*(boolean)* Whether to inhibit errors raised when registering a new account if the user ID already exists. If turned on, requests to `/register/available` will always show a user ID as available, and Synapse won't raise an error when starting a registration with a user ID that already exists. However, Synapse will still raise an error if the registration completes and the username conflicts. Defaults to `false`. Example configuration: ```yaml inhibit_user_in_use_error: true ``` --- +### `allow_underscore_prefixed_registration` + +*(boolean)* Whether users are allowed to register with a underscore-prefixed localpart. By default, AppServices use prefixes like `_example` to namespace their associated ghost users. If turned on, this may result in clashes or confusion. Useful when provisioning users from an external identity provider. Defaults to `false`. + +Example configuration: +```yaml +allow_underscore_prefixed_registration: true +``` +--- ## User session management + +Config options related to user session management. + --- ### `session_lifetime` -Time that a user's session remains valid for, after they log in. +*(duration)* Time that a user's session remains valid for, after they log in. Note that this is not currently compatible with guest logins. -Note also that this is calculated at login time: changes are not applied retrospectively to users who have already -logged in. +Note also that this is calculated at login time: changes are not applied retrospectively to users who have already logged in. -By default, this is infinite. +Defaults to `"infinity"`. Example configuration: ```yaml @@ -2744,16 +2665,15 @@ session_lifetime: 24h --- ### `refreshable_access_token_lifetime` -Time that an access token remains valid for, if the session is using refresh tokens. +*(duration)* Time that an access token remains valid for, if the session is using refresh tokens. For more information about refresh tokens, please see the [manual](user_authentication/refresh_tokens.md). Note that this only applies to clients which advertise support for refresh tokens. -Note also that this is calculated at login time and refresh time: changes are not applied to -existing sessions until they are refreshed. +Note also that this is calculated at login time and refresh time: changes are not applied to existing sessions until they are refreshed. -By default, this is 5 minutes. +Defaults to `"5m"`. Example configuration: ```yaml @@ -2762,15 +2682,11 @@ refreshable_access_token_lifetime: 10m --- ### `refresh_token_lifetime` -Time that a refresh token remains valid for (provided that it is not -exchanged for another one first). -This option can be used to automatically log-out inactive sessions. -Please see the manual for more information. +*(duration)* Time that a refresh token remains valid for (provided that it is not exchanged for another one first). This option can be used to automatically log-out inactive sessions. Please see the manual for more information. -Note also that this is calculated at login time and refresh time: -changes are not applied to existing sessions until they are refreshed. +Note also that this is calculated at login time and refresh time: changes are not applied to existing sessions until they are refreshed. -By default, this is infinite. +Defaults to `"infinity"`. Example configuration: ```yaml @@ -2779,17 +2695,13 @@ refresh_token_lifetime: 24h --- ### `nonrefreshable_access_token_lifetime` -Time that an access token remains valid for, if the session is NOT -using refresh tokens. +*(duration)* Time that an access token remains valid for, if the session is NOT using refresh tokens. -Please note that not all clients support refresh tokens, so setting -this to a short value may be inconvenient for some users who will -then be logged out frequently. +Please note that not all clients support refresh tokens, so setting this to a short value may be inconvenient for some users who will then be logged out frequently. -Note also that this is calculated at login time: changes are not applied -retrospectively to existing sessions for users that have already logged in. +Note also that this is calculated at login time: changes are not applied retrospectively to existing sessions for users that have already logged in. -By default, this is infinite. +Defaults to `"infinity"`. Example configuration: ```yaml @@ -2800,54 +2712,50 @@ nonrefreshable_access_token_lifetime: 24h The amount of time to allow a user-interactive authentication session to be active. -This defaults to 0, meaning the user is queried for their credentials -before every action, but this can be overridden to allow a single -validation to be re-used. This weakens the protections afforded by -the user-interactive authentication process, by allowing for multiple -(and potentially different) operations to use the same validation session. +This defaults to 0, meaning the user is queried for their credentials before every action, but this can be overridden to allow a single validation to be re-used. This weakens the protections afforded by the user-interactive authentication process, by allowing for multiple (and potentially different) operations to use the same validation session. -This is ignored for potentially "dangerous" operations (including -deactivating an account, modifying an account password, adding a 3PID, -and minting additional login tokens). +This is ignored for potentially "dangerous" operations (including deactivating an account, modifying an account password, adding a 3PID, and minting additional login tokens). Use the `session_timeout` sub-option here to change the time allowed for credential validation. +Defaults to `0`. + Example configuration: ```yaml ui_auth: - session_timeout: "15s" + session_timeout: 15s ``` --- ### `login_via_existing_session` -Matrix supports the ability of an existing session to mint a login token for -another client. +*(object)* Matrix supports the ability of an existing session to mint a login token for another client. -Synapse disables this by default as it has security ramifications -- a malicious -client could use the mechanism to spawn more than one session. +Synapse disables this by default as it has security ramifications – a malicious client could use the mechanism to spawn more than one session. + +This setting has the following sub-options: -The duration of time the generated token is valid for can be configured with the -`token_timeout` sub-option. +* `enabled` (boolean): Enable login via existing session. Defaults to `false`. -User-interactive authentication is required when this is enabled unless the -`require_ui_auth` sub-option is set to `False`. +* `require_ui_auth` (boolean): Require user-interactive authentication. Defaults to `true`. + +* `token_timeout` (duration): Duration of time the generated token is valid. Defaults to `"5m"`. Example configuration: ```yaml login_via_existing_session: - enabled: true - require_ui_auth: false - token_timeout: "5m" + enabled: true + require_ui_auth: false + token_timeout: 5m ``` --- ## Metrics + Config options related to metrics. --- ### `enable_metrics` -Set to true to enable collection and rendering of performance metrics. -Defaults to false. +*(boolean)* Set to true to enable collection and rendering of performance metrics. Defaults to `false`. Example configuration: ```yaml @@ -2856,51 +2764,46 @@ enable_metrics: true --- ### `sentry` -Use this option to enable sentry integration. Provide the DSN assigned to you by sentry -with the `dsn` setting. +*(object)* Use this option to enable sentry integration. Provide the DSN assigned to you by sentry with the `dsn` setting. + +An optional `environment` field can be used to specify an environment. This allows for log maintenance based on different environments, ensuring better organization and analysis. + +NOTE: While attempts are made to ensure that the logs don't contain any sensitive information, this cannot be guaranteed. By enabling this option the sentry server may therefore receive sensitive information, and it in turn may then disseminate sensitive information through insecure notification channels if so configured. - An optional `environment` field can be used to specify an environment. This allows - for log maintenance based on different environments, ensuring better organization - and analysis.. +This setting has the following sub-options: + +* `dsn` (string|null): The DSN assigned by sentry. If unset or null, sentry integration is disabled. Defaults to `null`. -NOTE: While attempts are made to ensure that the logs don't contain -any sensitive information, this cannot be guaranteed. By enabling -this option the sentry server may therefore receive sensitive -information, and it in turn may then disseminate sensitive information -through insecure notification channels if so configured. +* `environment` (string|null): Sentry environment. Defaults to `null`. Example configuration: ```yaml sentry: - environment: "production" - dsn: "..." + environment: production + dsn: '...' ``` --- ### `metrics_flags` -Flags to enable Prometheus metrics which are not suitable to be -enabled by default, either for performance reasons or limited use. -Currently the only option is `known_servers`, which publishes -`synapse_federation_known_servers`, a gauge of the number of -servers this homeserver knows about, including itself. May cause -performance problems on large homeservers. +*(object)* Flags to enable Prometheus metrics which are not suitable to be enabled by default, either for performance reasons or limited use. Currently the only option is `known_servers`. + +This setting has the following sub-options: + +* `known_servers` (boolean): Publishes `synapse_federation_known_servers`, a gauge of the number of servers this homeserver knows about, including itself. May cause performance problems on large homeservers. Defaults to `false`. Example configuration: ```yaml metrics_flags: - known_servers: true + known_servers: true ``` --- ### `report_stats` -Whether or not to report homeserver usage statistics. This is originally -set when generating the config. Set this option to true or false to change the current -behavior. See -[Reporting Homeserver Usage Statistics](../administration/monitoring/reporting_homeserver_usage_statistics.md) -for information on what data is reported. +*(boolean)* Whether or not to report homeserver usage statistics. This is originally set when generating the config. Set this option to true or false to change the current behavior. See [Reporting Homeserver Usage Statistics](../administration/monitoring/reporting_homeserver_usage_statistics.md) for information on what data is reported. -Statistics will be reported 5 minutes after Synapse starts, and then every 3 hours -after that. +Statistics will be reported 5 minutes after Synapse starts, and then every 3 hours after that. + +Defaults to `false`. Example configuration: ```yaml @@ -2909,8 +2812,7 @@ report_stats: true --- ### `report_stats_endpoint` -The endpoint to report homeserver usage statistics to. -Defaults to https://matrix.org/report-usage-stats/push +*(string)* The endpoint to report homeserver usage statistics to. Defaults to `"https://matrix.org/report-usage-stats/push"`. Example configuration: ```yaml @@ -2918,14 +2820,13 @@ report_stats_endpoint: https://example.com/report-usage-stats/push ``` --- ## API Configuration -Config settings related to the client/server API + +Config settings related to the client/server API. --- ### `room_prejoin_state` -This setting controls the state that is shared with users upon receiving an -invite to a room, or in reply to a knock on a room. By default, the following -state events are shared with users: +*(object)* This setting controls the state that is shared with users upon receiving an invite to a room, or in reply to a knock on a room. By default, the following state events are shared with users: - `m.room.join_rules` - `m.room.canonical_alias` @@ -2935,56 +2836,43 @@ state events are shared with users: - `m.room.create` - `m.room.topic` -To change the default behavior, use the following sub-options: -* `disable_default_event_types`: boolean. Set to `true` to disable the above - defaults. If this is enabled, only the event types listed in - `additional_event_types` are shared. Defaults to `false`. -* `additional_event_types`: A list of additional state events to include in the - events to be shared. By default, this list is empty (so only the default event - types are shared). +*Changed in Synapse 1.74:* admins can filter the events in prejoin state based on their state key. + +This setting has the following sub-options: + +* `disable_default_event_types` (boolean): Set to `true` to disable the above defaults. If this is enabled, only the event types listed in `additional_event_types` are shared. Defaults to `false`. + +* `additional_event_types` (array): A list of additional state events to include in the events to be shared. By default, this list is empty (so only the default event types are shared). - Each entry in this list should be either a single string or a list of two - strings. - * A standalone string `t` represents all events with type `t` (i.e. - with no restrictions on state keys). - * A pair of strings `[t, s]` represents a single event with type `t` and - state key `s`. The same type can appear in two entries with different state - keys: in this situation, both state keys are included in prejoin state. + Each entry in this list should be either a single string or a list of two strings. + * A standalone string `t` represents all events with type `t` (i.e. with no restrictions on state keys). + * A pair of strings `[t, s]` represents a single event with type `t` and state key `s`. The same type can appear in two entries with different state keys: in this situation, both state keys are included in prejoin state. + + Defaults to `[]`. Example configuration: ```yaml room_prejoin_state: - disable_default_event_types: false - additional_event_types: - # Share all events of type `org.example.custom.event.typeA` - - org.example.custom.event.typeA - # Share only events of type `org.example.custom.event.typeB` whose - # state_key is "foo" - - ["org.example.custom.event.typeB", "foo"] - # Share only events of type `org.example.custom.event.typeC` whose - # state_key is "bar" or "baz" - - ["org.example.custom.event.typeC", "bar"] - - ["org.example.custom.event.typeC", "baz"] + disable_default_event_types: false + additional_event_types: + - org.example.custom.event.typeA + - - org.example.custom.event.typeB + - foo + - - org.example.custom.event.typeC + - bar + - - org.example.custom.event.typeC + - baz ``` - -*Changed in Synapse 1.74:* admins can filter the events in prejoin state based -on their state key. - --- ### `track_puppeted_user_ips` -We record the IP address of clients used to access the API for various -reasons, including displaying it to the user in the "Where you're signed in" -dialog. +*(boolean)* We record the IP address of clients used to access the API for various reasons, including displaying it to the user in the "Where you're signed in" dialog. + +By default, when puppeting another user via the admin API, the client IP address is recorded against the user who created the access token (ie, the admin user), and *not* the puppeted user. -By default, when puppeting another user via the admin API, the client IP -address is recorded against the user who created the access token (ie, the -admin user), and *not* the puppeted user. +Set this option to true to also record the IP address against the puppeted user. (This also means that the puppeted user will count as an "active" user for the purpose of monthly active user tracking – see `limit_usage_by_mau` etc above.) -Set this option to true to also record the IP address against the puppeted -user. (This also means that the puppeted user will count as an "active" user -for the purpose of monthly active user tracking - see `limit_usage_by_mau` etc -above.) +Defaults to `false`. Example configuration: ```yaml @@ -2993,19 +2881,18 @@ track_puppeted_user_ips: true --- ### `app_service_config_files` -A list of application service config files to use. +*(array)* A list of application service config files to use. Defaults to `[]`. Example configuration: ```yaml app_service_config_files: - - app_service_1.yaml - - app_service_2.yaml +- app_service_1.yaml +- app_service_2.yaml ``` --- ### `track_appservice_user_ips` -Defaults to false. Set to true to enable tracking of application service IP addresses. -Implicitly enables MAU tracking for application service users. +*(boolean)* Set to true to enable tracking of application service IP addresses. Implicitly enables MAU tracking for application service users. Defaults to `false`. Example configuration: ```yaml @@ -3014,82 +2901,122 @@ track_appservice_user_ips: true --- ### `use_appservice_legacy_authorization` -Whether to send the application service access tokens via the `access_token` query parameter -per older versions of the Matrix specification. Defaults to false. Set to true to enable sending -access tokens via a query parameter. +*(boolean)* Whether to send the application service access tokens via the `access_token` query parameter per older versions of the Matrix specification. Defaults to false. Set to true to enable sending access tokens via a query parameter. -**Enabling this option is considered insecure and is not recommended. ** +**Enabling this option is considered insecure and is not recommended.** + +Defaults to `false`. Example configuration: ```yaml use_appservice_legacy_authorization: true ``` - --- ### `macaroon_secret_key` -A secret which is used to sign +*(string|null)* A secret which is used to sign - access token for guest users, -- short-term login token used during SSO logins (OIDC or SAML2) and +- short-term login token used during SSO logins (OIDC) and - token used for unsubscribing from email notifications. -If none is specified, the `registration_shared_secret` is used, if one is given; -otherwise, a secret key is derived from the signing key. +If none is specified, the `registration_shared_secret` is used, if one is given; otherwise, a secret key is derived from the signing key. + +> ⚠️ **Warning** – Replacing an existing `macaroon_secret_key` with a new one will lead to invalidation of access tokens for all guest users. It will also break unsubscribe links in emails sent before the change. An unlucky user might encounter a broken SSO login flow and would have to start again. + +Defaults to `null`. Example configuration: ```yaml macaroon_secret_key: <PRIVATE STRING> ``` --- +### `macaroon_secret_key_path` + +*(string|null)* An alternative to [`macaroon_secret_key`](#macaroon_secret_key): allows the secret key to be specified in an external file. + +The file should be a plain text file, containing only the secret key. Synapse reads the secret key from the given file once at startup. + +_Added in Synapse 1.121.0._ + +Defaults to `null`. + +Example configuration: +```yaml +macaroon_secret_key_path: /path/to/secrets/file +``` +--- ### `form_secret` -A secret which is used to calculate HMACs for form values, to stop -falsification of values. Must be specified for the User Consent -forms to work. +*(string|null)* A secret which is used to calculate HMACs for form values, to stop falsification of values. Must be specified for the User Consent forms to work. + +Replacing an existing `form_secret` with a new one might break the user consent page for an unlucky user and require them to reopen the page from a new link. + +Defaults to `null`. Example configuration: ```yaml form_secret: <PRIVATE STRING> ``` --- +### `form_secret_path` + +*(string|null)* An alternative to [`form_secret`](#form_secret): allows the secret to be specified in an external file. + +The file should be a plain text file, containing only the secret. Synapse reads the secret from the given file once at startup. + +_Added in Synapse 1.126.0._ + +Defaults to `null`. + +Example configuration: +```yaml +form_secret_path: /path/to/secrets/file +``` +--- ## Signing Keys -Config options relating to signing keys + +Config options relating to signing keys. --- ### `signing_key_path` -Path to the signing key to sign events and federation requests with. +*(string|null)* Path to the signing key to sign events and federation requests with. -*New in Synapse 1.67*: If this file does not exist, Synapse will create a new signing -key on startup and store it in this file. +*New in Synapse 1.67*: If this file does not exist, Synapse will create a new signing key on startup and store it in this file. + +Defaults to `null`. Example configuration: ```yaml -signing_key_path: "CONFDIR/SERVERNAME.signing.key" +signing_key_path: CONFDIR/SERVERNAME.signing.key ``` --- ### `old_signing_keys` -The keys that the server used to sign messages with but won't use -to sign new messages. For each key, `key` should be the base64-encoded public key, and -`expired_ts`should be the time (in milliseconds since the unix epoch) that -it was last used. +*(object)* The keys that the server used to sign messages with but won't use to sign new messages. + +It is possible to build an entry from an old `signing.key` file using the `export_signing_key` script which is provided with synapse. -It is possible to build an entry from an old `signing.key` file using the -`export_signing_key` script which is provided with synapse. +If you have lost the private key file, you can ask another server you trust to tell you the public keys it has seen from your server. To fetch the keys from `matrix.org`, try something like: + +``` +curl https://matrix-federation.matrix.org/_matrix/key/v2/query/myserver.example.com | + jq '.server_keys | map(.verify_keys) | add' +``` + +Defaults to `{}`. Example configuration: ```yaml old_signing_keys: - "ed25519:id": { key: "base64string", expired_ts: 123456789123 } + ed25519:id: + key: base64string + expired_ts: 123456789123 ``` --- ### `key_refresh_interval` -How long key response published by this server is valid for. -Used to set the `valid_until_ts` in `/key/v2` APIs. -Determines how quickly servers will query to check which keys -are still valid. Defaults to 1d. +*(duration)* How long key response published by this server is valid for. Used to set the `valid_until_ts` in `/key/v2` APIs. Determines how quickly servers will query to check which keys are still valid. Defaults to `"1d"`. Example configuration: ```yaml @@ -3098,57 +3025,41 @@ key_refresh_interval: 2d --- ### `trusted_key_servers` -The trusted servers to download signing keys from. +*(array)* The trusted servers to download signing keys from. When we need to fetch a signing key, each server is tried in parallel. -Normally, the connection to the key server is validated via TLS certificates. -Additional security can be provided by configuring a `verify key`, which -will make synapse check that the response is signed by that key. - -This setting supersedes an older setting named `perspectives`. The old format -is still supported for backwards-compatibility, but it is deprecated. - -`trusted_key_servers` defaults to matrix.org, but using it will generate a -warning on start-up. To suppress this warning, set -`suppress_key_server_warning` to true. - -If the use of a trusted key server has to be deactivated, e.g. in a private -federation or for privacy reasons, this can be realised by setting -an empty array (`trusted_key_servers: []`). Then Synapse will request the keys -directly from the server that owns the keys. If Synapse does not get keys directly -from the server, the events of this server will be rejected. - -Options for each entry in the list include: -* `server_name`: the name of the server. Required. -* `verify_keys`: an optional map from key id to base64-encoded public key. - If specified, we will check that the response is signed by at least - one of the given keys. -* `accept_keys_insecurely`: a boolean. Normally, if `verify_keys` is unset, - and `federation_verify_certificates` is not `true`, synapse will refuse - to start, because this would allow anyone who can spoof DNS responses - to masquerade as the trusted key server. If you know what you are doing - and are sure that your network environment provides a secure connection - to the key server, you can set this to `true` to override this behaviour. - -Example configuration #1: +Normally, the connection to the key server is validated via TLS certificates. Additional security can be provided by configuring a `verify key`, which will make synapse check that the response is signed by that key. + +This setting supersedes an older setting named `perspectives`. The old format is still supported for backwards-compatibility, but it is deprecated. + +`trusted_key_servers` defaults to matrix.org, but using it will generate a warning on start-up. To suppress this warning, set `suppress_key_server_warning` to true. + +If the use of a trusted key server has to be deactivated, e.g. in a private federation or for privacy reasons, this can be realised by setting an empty array (`trusted_key_servers: []`). Then Synapse will request the keys directly from the server that owns the keys. If Synapse does not get keys directly from the server, the events of this server will be rejected. + +Default configuration: ```yaml trusted_key_servers: - - server_name: "my_trusted_server.example.com" - verify_keys: - "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" - - server_name: "my_other_trusted_server.example.com" +- server_name: matrix.org ``` -Example configuration #2: + +Example configurations: ```yaml trusted_key_servers: - - server_name: "matrix.org" +- server_name: my_trusted_server.example.com + verify_keys: + ed25519:auto: abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr +- server_name: my_other_trusted_server.example.com +``` + +```yaml +trusted_key_servers: +- server_name: matrix.org ``` --- ### `suppress_key_server_warning` -Set the following to true to disable the warning that is emitted when the -`trusted_key_servers` include 'matrix.org'. See above. +*(boolean)* Set the following to true to disable the warning that is emitted when the `trusted_key_servers` include "matrix.org". See above. Defaults to `false`. Example configuration: ```yaml @@ -3157,636 +3068,342 @@ suppress_key_server_warning: true --- ### `key_server_signing_keys_path` -The signing keys to use when acting as a trusted key server. If not specified -defaults to the server signing key. +*(string|null)* The signing keys to use when acting as a trusted key server. If not specified defaults to the server signing key. Can contain multiple keys, one per line. +Defaults to `null`. + Example configuration: ```yaml -key_server_signing_keys_path: "key_server_signing_keys.key" +key_server_signing_keys_path: key_server_signing_keys.key ``` --- ## Single sign-on integration -The following settings can be used to make Synapse use a single sign-on -provider for authentication, instead of its internal password database. +The following settings can be used to make Synapse use a single sign-on provider for authentication, instead of its internal password database. -You will probably also want to set the following options to `false` to -disable the regular login/registration flows: - * [`enable_registration`](#enable_registration) - * [`password_config.enabled`](#password_config) +You will probably also want to set the following options to `false` to disable the regular login/registration flows: +* [`enable_registration`](#enable_registration) +* [`password_config.enabled`](#password_config) --- -### `saml2_config` +### `oidc_providers` -Enable SAML2 for registration and login. Uses pysaml2. To learn more about pysaml and -to find a full list options for configuring pysaml, read the docs [here](https://pysaml2.readthedocs.io/en/latest/). +*(array)* List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration and login. See [here](../../openid.md) for information on how to configure these options. -At least one of `sp_config` or `config_path` must be set in this section to -enable SAML login. You can either put your entire pysaml config inline using the `sp_config` -option, or you can specify a path to a psyaml config file with the sub-option `config_path`. -This setting has the following sub-options: +For backwards compatibility, it is also possible to configure a single OIDC provider via an `oidc_config` setting. This is now deprecated and admins are advised to migrate to the `oidc_providers` format. (When doing that migration, use `oidc` for the `idp_id` to ensure that existing users continue to be recognised.) -* `idp_name`: A user-facing name for this identity provider, which is used to - offer the user a choice of login mechanisms. -* `idp_icon`: An optional icon for this identity provider, which is presented - by clients and Synapse's own IdP picker page. If given, must be an - MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to - obtain such an MXC URI is to upload an image to an (unencrypted) room - and then copy the "url" from the source of the event.) -* `idp_brand`: An optional brand for this identity provider, allowing clients - to style the login flow according to the identity provider in question. - See the [spec](https://spec.matrix.org/latest/) for possible options here. -* `sp_config`: the configuration for the pysaml2 Service Provider. See pysaml2 docs for format of config. - Default values will be used for the `entityid` and `service` settings, - so it is not normally necessary to specify them unless you need to - override them. Here are a few useful sub-options for configuring pysaml: - * `metadata`: Point this to the IdP's metadata. You must provide either a local - file via the `local` attribute or (preferably) a URL via the - `remote` attribute. - * `accepted_time_diff: 3`: Allowed clock difference in seconds between the homeserver and IdP. - Defaults to 0. - * `service`: By default, the user has to go to our login page first. If you'd like - to allow IdP-initiated login, set `allow_unsolicited` to true under `sp` in the `service` - section. -* `config_path`: specify a separate pysaml2 configuration file thusly: - `config_path: "CONFDIR/sp_conf.py"` -* `saml_session_lifetime`: The lifetime of a SAML session. This defines how long a user has to - complete the authentication process, if `allow_unsolicited` is unset. The default is 15 minutes. -* `user_mapping_provider`: Using this option, an external module can be provided as a - custom solution to mapping attributes returned from a saml provider onto a matrix user. The - `user_mapping_provider` has the following attributes: - * `module`: The custom module's class. - * `config`: Custom configuration values for the module. Use the values provided in the - example if you are using the built-in user_mapping_provider, or provide your own - config values for a custom class if you are using one. This section will be passed as a Python - dictionary to the module's `parse_config` method. The built-in provider takes the following two - options: - * `mxid_source_attribute`: The SAML attribute (after mapping via the attribute maps) to use - to derive the Matrix ID from. It is 'uid' by default. Note: This used to be configured by the - `saml2_config.mxid_source_attribute option`. If that is still defined, its value will be used instead. - * `mxid_mapping`: The mapping system to use for mapping the saml attribute onto a - matrix ID. Options include: `hexencode` (which maps unpermitted characters to '=xx') - and `dotreplace` (which replaces unpermitted characters with '.'). - The default is `hexencode`. Note: This used to be configured by the - `saml2_config.mxid_mapping option`. If that is still defined, its value will be used instead. -* `grandfathered_mxid_source_attribute`: In previous versions of synapse, the mapping from SAML attribute to - MXID was always calculated dynamically rather than stored in a table. For backwards- compatibility, we will look for `user_ids` - matching such a pattern before creating a new account. This setting controls the SAML attribute which will be used for this - backwards-compatibility lookup. Typically it should be 'uid', but if the attribute maps are changed, it may be necessary to change it. - The default is 'uid'. -* `attribute_requirements`: It is possible to configure Synapse to only allow logins if SAML attributes - match particular values. The requirements can be listed under - `attribute_requirements` as shown in the example. All of the listed attributes must - match for the login to be permitted. -* `idp_entityid`: If the metadata XML contains multiple IdP entities then the `idp_entityid` - option must be set to the entity to redirect users to. - Most deployments only have a single IdP entity and so should omit this option. - - -Once SAML support is enabled, a metadata file will be exposed at -`https://<server>:<port>/_synapse/client/saml2/metadata.xml`, which you may be able to -use to configure your SAML IdP with. Alternatively, you can manually configure -the IdP to use an ACS location of -`https://<server>:<port>/_synapse/client/saml2/authn_response`. - -Example configuration: -```yaml -saml2_config: - sp_config: - metadata: - local: ["saml2/idp.xml"] - remote: - - url: https://our_idp/metadata.xml - accepted_time_diff: 3 - - service: - sp: - allow_unsolicited: true - - # The examples below are just used to generate our metadata xml, and you - # may well not need them, depending on your setup. Alternatively you - # may need a whole lot more detail - see the pysaml2 docs! - description: ["My awesome SP", "en"] - name: ["Test SP", "en"] - - ui_info: - display_name: - - lang: en - text: "Display Name is the descriptive name of your service." - description: - - lang: en - text: "Description should be a short paragraph explaining the purpose of the service." - information_url: - - lang: en - text: "https://example.com/terms-of-service" - privacy_statement_url: - - lang: en - text: "https://example.com/privacy-policy" - keywords: - - lang: en - text: ["Matrix", "Element"] - logo: - - lang: en - text: "https://example.com/logo.svg" - width: "200" - height: "80" - - organization: - name: Example com - display_name: - - ["Example co", "en"] - url: "http://example.com" - - contact_person: - - given_name: Bob - sur_name: "the Sysadmin" - email_address": ["admin@example.com"] - contact_type": technical - - saml_session_lifetime: 5m +It is possible to configure Synapse to only allow logins if certain attributes match particular values in the OIDC userinfo. The requirements can be listed under `attribute_requirements` as shown here: +```yaml +attribute_requirements: + - attribute: family_name + one_of: ["Stephensson", "Smith"] + - attribute: groups + value: "admin" + # If `value` or `one_of` are not specified, the attribute only needs + # to exist, regardless of value. + - attribute: picture +``` - user_mapping_provider: - # Below options are intended for the built-in provider, they should be - # changed if using a custom module. - config: - mxid_source_attribute: displayName - mxid_mapping: dotreplace +`attribute` is a required field, while `value` and `one_of` are optional. - grandfathered_mxid_source_attribute: upn +All of the listed attributes must match for the login to be permitted. Additional attributes can be added to userinfo by expanding the `scopes` section of the OIDC config to retrieve additional information from the OIDC provider. - attribute_requirements: - - attribute: userGroup - value: "staff" - - attribute: department - value: "sales" +If the OIDC claim is a list, then the attribute must match any value in the list. Otherwise, it must exactly match the value of the claim. Using the example above, the `family_name` claim MUST be either "Stephensson" or "Smith", but the `groups` claim MUST contain "admin". - idp_entityid: 'https://our_idp/entityid' -``` ---- -### `oidc_providers` +Defaults to `[]`. -List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration -and login. See [here](../../openid.md) -for information on how to configure these options. +Options for each entry include: -For backwards compatibility, it is also possible to configure a single OIDC -provider via an `oidc_config` setting. This is now deprecated and admins are -advised to migrate to the `oidc_providers` format. (When doing that migration, -use `oidc` for the `idp_id` to ensure that existing users continue to be -recognised.) +* `idp_id` (string): A unique identifier for this identity provider. Used internally by Synapse; should be a single word such as "github". Note that, if this is changed, users authenticating via that provider will no longer be recognised as the same user! (Use "oidc" here if you are migrating from an old `oidc_config` configuration.) -Options for each entry include: -* `idp_id`: a unique identifier for this identity provider. Used internally - by Synapse; should be a single word such as 'github'. - Note that, if this is changed, users authenticating via that provider - will no longer be recognised as the same user! - (Use "oidc" here if you are migrating from an old `oidc_config` configuration.) +* `idp_name` (string): A user-facing name for this identity provider, which is used to offer the user a choice of login mechanisms. -* `idp_name`: A user-facing name for this identity provider, which is used to - offer the user a choice of login mechanisms. +* `idp_icon` (string): An optional icon for this identity provider, which is presented by clients and Synapse's own IdP picker page. If given, must be an MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to obtain such an MXC URI is to upload an image to an (unencrypted) room and then copy the URL from the source of the event.) -* `idp_icon`: An optional icon for this identity provider, which is presented - by clients and Synapse's own IdP picker page. If given, must be an - MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to - obtain such an MXC URI is to upload an image to an (unencrypted) room - and then copy the "url" from the source of the event.) +* `idp_brand` (string): An optional brand for this identity provider, allowing clients to style the login flow according to the identity provider in question. See the [spec](https://spec.matrix.org/latest/) for possible options here. -* `idp_brand`: An optional brand for this identity provider, allowing clients - to style the login flow according to the identity provider in question. - See the [spec](https://spec.matrix.org/latest/) for possible options here. +* `discover` (boolean): Set to false to disable the use of the OIDC discovery mechanism to discover endpoints. Defaults to true. -* `discover`: set to false to disable the use of the OIDC discovery mechanism - to discover endpoints. Defaults to true. +* `issuer` (string): Required. The OIDC issuer. Used to validate tokens and (if discovery is enabled) to discover the provider's endpoints. -* `issuer`: Required. The OIDC issuer. Used to validate tokens and (if discovery - is enabled) to discover the provider's endpoints. +* `client_id` (string): Required. OAuth2 client id to use. -* `client_id`: Required. oauth2 client id to use. +* `client_secret` (string|null): OAuth2 client secret to use. May be omitted if `client_secret_jwt_key` is given, or if `client_auth_method` is `none`. Must be omitted if `client_secret_path` is specified. -* `client_secret`: oauth2 client secret to use. May be omitted if - `client_secret_jwt_key` is given, or if `client_auth_method` is 'none'. - Must be omitted if `client_secret_path` is specified. +* `client_secret_path` (string|null): Path to the OAuth2 client secret to use. With that it's not necessary to leak secrets into the config file itself. Mutually exclusive with `client_secret`. Can be omitted if `client_secret_jwt_key` is specified. -* `client_secret_path`: path to the oauth2 client secret to use. With that - it's not necessary to leak secrets into the config file itself. - Mutually exclusive with `client_secret`. Can be omitted if - `client_secret_jwt_key` is specified. + *Added in Synapse 1.91.0.* - *Added in Synapse 1.91.0.* +* `client_secret_jwt_key` (object|null): Alternative to client_secret: details of a key used to create a JSON Web Token to be used as an OAuth2 client secret. -* `client_secret_jwt_key`: Alternative to client_secret: details of a key used - to create a JSON Web Token to be used as an OAuth2 client secret. If - given, must be a dictionary with the following properties: + This setting has the following sub-options: - * `key`: a pem-encoded signing key. Must be a suitable key for the - algorithm specified. Required unless `key_file` is given. + * `key` (string|null): A pem-encoded signing key. Must be a suitable key for the algorithm specified. Required unless `key_file` is given. - * `key_file`: the path to file containing a pem-encoded signing key file. - Required unless `key` is given. + * `key_file` (string|null): Path to the file containing a pem-encoded signing key. Required unless `key` is given. - * `jwt_header`: a dictionary giving properties to include in the JWT - header. Must include the key `alg`, giving the algorithm used to - sign the JWT, such as "ES256", using the JWA identifiers in - RFC7518. + * `jwt_header` (object): Dictionary giving properties to include in the JWT header. Must include the key `alg`. - * `jwt_payload`: an optional dictionary giving properties to include in - the JWT payload. Normally this should include an `iss` key. + This setting has the following sub-options: -* `client_auth_method`: auth method to use when exchanging the token. Valid - values are `client_secret_basic` (default), `client_secret_post` and - `none`. + * `alg` (string): Algorithm used to sign the JWT, such as ES256, using the JWA identifiers in RFC7518. -* `pkce_method`: Whether to use proof key for code exchange when requesting - and exchanging the token. Valid values are: `auto`, `always`, or `never`. Defaults - to `auto`, which uses PKCE if supported during metadata discovery. Set to `always` - to force enable PKCE or `never` to force disable PKCE. + * `jwt_payload` (object): Optional dictionary giving properties to include in the JWT payload. Normally this should include an `iss` key. -* `scopes`: list of scopes to request. This should normally include the "openid" - scope. Defaults to `["openid"]`. +* `client_auth_method` (string|null): Auth method to use when exchanging the token. Valid values are `client_secret_basic` (default), `client_secret_post` and `none`. -* `authorization_endpoint`: the oauth2 authorization endpoint. Required if - provider discovery is disabled. +* `pkce_method` (string|null): Whether to use proof key for code exchange when requesting and exchanging the token. Valid values are: `auto`, `always`, or `never`. Defaults to `auto`, which uses PKCE if supported during metadata discovery. Set to `always` to force enable PKCE or `never` to force disable PKCE. -* `token_endpoint`: the oauth2 token endpoint. Required if provider discovery is - disabled. +* `id_token_signing_alg_values_supported` (array): List of the JWS signing algorithms (`alg` values) that are supported for signing the `id_token`. -* `userinfo_endpoint`: the OIDC userinfo endpoint. Required if discovery is - disabled and the 'openid' scope is not requested. + This is *not* required if `discovery` is disabled. We default to supporting `RS256` in the downstream usage if no algorithms are configured here or in the discovery document. -* `jwks_uri`: URI where to fetch the JWKS. Required if discovery is disabled and - the 'openid' scope is used. + According to the spec, the algorithm `"RS256"` MUST be included. The absolute rigid approach would be to reject this provider as non-compliant if it's not included but we simply allow whatever and see what happens (you're the one that configured the value and cooperating with the identity provider). -* `skip_verification`: set to 'true' to skip metadata verification. Use this if - you are connecting to a provider that is not OpenID Connect compliant. - Defaults to false. Avoid this in production. + The `alg` value `"none"` MAY be supported but can only be used if the Authorization Endpoint does not include `id_token` in the `response_type` (ex. `/authorize?response_type=code` where `none` can apply, `/authorize?response_type=code%20id_token` where `none` can't apply) (such as when using the Authorization Code Flow). -* `user_profile_method`: Whether to fetch the user profile from the userinfo - endpoint, or to rely on the data returned in the id_token from the `token_endpoint`. - Valid values are: `auto` or `userinfo_endpoint`. - Defaults to `auto`, which uses the userinfo endpoint if `openid` is - not included in `scopes`. Set to `userinfo_endpoint` to always use the - userinfo endpoint. +* `scopes` (array|null): List of scopes to request. This should normally include the "openid" scope. Defaults to `["openid"]`. -* `additional_authorization_parameters`: String to string dictionary that will be passed as - additional parameters to the authorization grant URL. +* `authorization_endpoint` (string): The OAuth2 authorization endpoint. Required if provider discovery is disabled. -* `allow_existing_users`: set to true to allow a user logging in via OIDC to - match a pre-existing account instead of failing. This could be used if - switching from password logins to OIDC. Defaults to false. +* `token_endpoint` (string): The OAuth2 token endpoint. Required if provider discovery is disabled. -* `enable_registration`: set to 'false' to disable automatic registration of new - users. This allows the OIDC SSO flow to be limited to sign in only, rather than - automatically registering users that have a valid SSO login but do not have - a pre-registered account. Defaults to true. +* `userinfo_endpoint` (string): The OIDC userinfo endpoint. Required if discovery is disabled and the "openid" scope is not requested. -* `user_mapping_provider`: Configuration for how attributes returned from a OIDC - provider are mapped onto a matrix user. This setting has the following - sub-properties: +* `jwks_uri` (string): URI where to fetch the JWKS. Required if discovery is disabled and the "openid" scope is used. - * `module`: The class name of a custom mapping module. Default is - `synapse.handlers.oidc.JinjaOidcMappingProvider`. - See [OpenID Mapping Providers](../../sso_mapping_providers.md#openid-mapping-providers) - for information on implementing a custom mapping provider. +* `skip_verification` (boolean): Set to `true` to skip metadata verification. Use this if you are connecting to a provider that is not OpenID Connect compliant. Defaults to false. Avoid this in production. - * `config`: Configuration for the mapping provider module. This section will - be passed as a Python dictionary to the user mapping provider - module's `parse_config` method. +* `user_profile_method` (string|null): Whether to fetch the user profile from the userinfo endpoint, or to rely on the data returned in the id_token from the `token_endpoint`. Valid values are: `auto` or `userinfo_endpoint`. Defaults to `auto`, which uses the userinfo endpoint if `openid` is not included in `scopes`. Set to `userinfo_endpoint` to always use the userinfo endpoint. - For the default provider, the following settings are available: +* `redirect_uri` (string|null): An optional string, that if set will override the `redirect_uri` parameter sent in the requests to the authorization and token endpoints. Useful if you want to redirect the client to another endpoint as part of the OIDC login. Be aware that the client must then call Synapse's OIDC callback URL (`<public_baseurl>/_synapse/client/oidc/callback`) manually afterwards. Must be a valid URL including scheme and path. - * `subject_template`: Jinja2 template for a unique identifier for the user. - Defaults to `{{ user.sub }}`, which OpenID Connect compliant providers should provide. +* `additional_authorization_parameters` (object): String to string dictionary that will be passed as additional parameters to the authorization grant URL. - This replaces and overrides `subject_claim`. +* `passthrough_authorization_parameters` (array): List of parameters that will be passed through from the redirect endpoint to the authorization grant URL. - * `subject_claim`: name of the claim containing a unique identifier - for the user. Defaults to 'sub', which OpenID Connect - compliant providers should provide. +* `allow_existing_users` (boolean): Set to true to allow a user logging in via OIDC to match a pre-existing account instead of failing. This could be used if switching from password logins to OIDC. Defaults to false. - *Deprecated in Synapse v1.75.0.* +* `enable_registration` (boolean): Set to `false` to disable automatic registration of new users. This allows the OIDC SSO flow to be limited to sign in only, rather than automatically registering users that have a valid SSO login but do not have a pre-registered account. Defaults to true. - * `picture_template`: Jinja2 template for an url for the user's profile picture. - Defaults to `{{ user.picture }}`, which OpenID Connect compliant providers should - provide and has to refer to a direct image file such as PNG, JPEG, or GIF image file. +* `user_mapping_provider` (object): Configuration for how attributes returned from a OIDC provider are mapped onto a matrix user. - This replaces and overrides `picture_claim`. + When rendering, the Jinja2 templates are given a `user` variable, which is set to the claims returned by the UserInfo Endpoint and/or in the ID Token. - Currently only supported in monolithic (single-process) server configurations - where the media repository runs within the Synapse process. + This setting has the following sub-options: - * `picture_claim`: name of the claim containing an url for the user's profile picture. - Defaults to 'picture', which OpenID Connect compliant providers should provide - and has to refer to a direct image file such as PNG, JPEG, or GIF image file. + * `module` (string): The class name of a custom mapping module. Default is `synapse.handlers.oidc.JinjaOidcMappingProvider`. See [OpenID Mapping Providers](../../sso_mapping_providers.md#openid-mapping-providers) for information on implementing a custom mapping provider. - Currently only supported in monolithic (single-process) server configurations - where the media repository runs within the Synapse process. + * `config` (object): Configuration for the mapping provider module. This section will be passed as a Python dictionary to the user mapping provider module's `parse_config` method. - *Deprecated in Synapse v1.75.0.* + For the default provider, the following settings are available: - * `localpart_template`: Jinja2 template for the localpart of the MXID. - If this is not set, the user will be prompted to choose their - own username (see the documentation for the `sso_auth_account_details.html` - template). This template can use the `localpart_from_email` filter. + * `subject_template`: Jinja2 template for a unique identifier for the user. Defaults to `{{ user.sub }}`, which OpenID Connect compliant providers should provide. - * `confirm_localpart`: Whether to prompt the user to validate (or - change) the generated localpart (see the documentation for the - 'sso_auth_account_details.html' template), instead of - registering the account right away. + This replaces and overrides `subject_claim`. - * `display_name_template`: Jinja2 template for the display name to set - on first login. If unset, no displayname will be set. + * `subject_claim`: name of the claim containing a unique identifier for the user. Defaults to `sub`, which OpenID Connect compliant providers should provide. - * `email_template`: Jinja2 template for the email address of the user. - If unset, no email address will be added to the account. + *Deprecated in Synapse v1.75.0.* - * `extra_attributes`: a map of Jinja2 templates for extra attributes - to send back to the client during login. Note that these are non-standard and clients will ignore them - without modifications. + * `picture_template`: Jinja2 template for an url for the user's profile picture. Defaults to `{{ user.picture }}`, which OpenID Connect compliant providers should provide and has to refer to a direct image file such as PNG, JPEG, or GIF image file. - When rendering, the Jinja2 templates are given a 'user' variable, - which is set to the claims returned by the UserInfo Endpoint and/or - in the ID Token. + This replaces and overrides `picture_claim`. -* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications. - Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`. - Defaults to `false`. + Currently only supported in monolithic (single-process) server configurations where the media repository runs within the Synapse process. -* `backchannel_logout_ignore_sub`: by default, the OIDC Back-Channel Logout feature checks that the - `sub` claim matches the subject claim received during login. This check can be disabled by setting - this to `true`. Defaults to `false`. + * `picture_claim`: name of the claim containing an url for the user's profile picture. Defaults to "picture", which OpenID Connect compliant providers should provide and has to refer to a direct image file such as PNG, JPEG, or GIF image file. - You might want to disable this if the `subject_claim` returned by the mapping provider is not `sub`. + Currently only supported in monolithic (single-process) server configurations where the media repository runs within the Synapse process. -It is possible to configure Synapse to only allow logins if certain attributes -match particular values in the OIDC userinfo. The requirements can be listed under -`attribute_requirements` as shown here: -```yaml -attribute_requirements: - - attribute: family_name - value: "Stephensson" - - attribute: groups - value: "admin" -``` -All of the listed attributes must match for the login to be permitted. Additional attributes can be added to -userinfo by expanding the `scopes` section of the OIDC config to retrieve -additional information from the OIDC provider. + *Deprecated in Synapse v1.75.0.* + + * `localpart_template`: Jinja2 template for the localpart of the MXID. If this is not set, the user will be prompted to choose their own username (see the documentation for the `sso_auth_account_details.html` template). This template can use the `localpart_from_email` filter. + + * `confirm_localpart`: Whether to prompt the user to validate (or change) the generated localpart (see the documentation for the "sso_auth_account_details.html" template), instead of registering the account right away. + + * `display_name_template`: Jinja2 template for the display name to set on first login. If unset, no displayname will be set. + + * `email_template`: Jinja2 template for the email address of the user. If unset, no email address will be added to the account. + + * `extra_attributes`: a map of Jinja2 templates for extra attributes to send back to the client during login. Note that these are non-standard and clients will ignore them without modifications. -If the OIDC claim is a list, then the attribute must match any value in the list. -Otherwise, it must exactly match the value of the claim. Using the example -above, the `family_name` claim MUST be "Stephensson", but the `groups` -claim MUST contain "admin". +* `backchannel_logout_enabled` (boolean): Set to `true` to process OIDC Back-Channel Logout notifications. Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`. Defaults to `false`. + +* `backchannel_logout_ignore_sub` (boolean): By default, the OIDC Back-Channel Logout feature checks that the `sub` claim matches the subject claim received during login. This check can be disabled by setting this to `true`. Defaults to `false`. + + You might want to disable this if the `subject_claim` returned by the mapping provider is not `sub`. Example configuration: ```yaml oidc_providers: - # Generic example - # - - idp_id: my_idp - idp_name: "My OpenID provider" - idp_icon: "mxc://example.com/mediaid" - discover: false - issuer: "https://accounts.example.com/" - client_id: "provided-by-your-issuer" - client_secret: "provided-by-your-issuer" - client_auth_method: client_secret_post - scopes: ["openid", "profile"] - authorization_endpoint: "https://accounts.example.com/oauth2/auth" - token_endpoint: "https://accounts.example.com/oauth2/token" - userinfo_endpoint: "https://accounts.example.com/userinfo" - jwks_uri: "https://accounts.example.com/.well-known/jwks.json" - additional_authorization_parameters: - acr_values: 2fa - skip_verification: true - enable_registration: true - user_mapping_provider: - config: - subject_claim: "id" - localpart_template: "{{ user.login }}" - display_name_template: "{{ user.name }}" - email_template: "{{ user.email }}" - attribute_requirements: - - attribute: userGroup - value: "synapseUsers" -``` ---- -### `cas_config` - -Enable Central Authentication Service (CAS) for registration and login. -Has the following sub-options: -* `enabled`: Set this to true to enable authorization against a CAS server. - Defaults to false. -* `idp_name`: A user-facing name for this identity provider, which is used to - offer the user a choice of login mechanisms. -* `idp_icon`: An optional icon for this identity provider, which is presented - by clients and Synapse's own IdP picker page. If given, must be an - MXC URI of the format `mxc://<server-name>/<media-id>`. (An easy way to - obtain such an MXC URI is to upload an image to an (unencrypted) room - and then copy the "url" from the source of the event.) -* `idp_brand`: An optional brand for this identity provider, allowing clients - to style the login flow according to the identity provider in question. - See the [spec](https://spec.matrix.org/latest/) for possible options here. -* `server_url`: The URL of the CAS authorization endpoint. -* `protocol_version`: The CAS protocol version, defaults to none (version 3 is required if you want to use "required_attributes"). -* `displayname_attribute`: The attribute of the CAS response to use as the display name. - If no name is given here, no displayname will be set. -* `required_attributes`: It is possible to configure Synapse to only allow logins if CAS attributes - match particular values. All of the keys given below must exist - and the values must match the given value. Alternately if the given value - is `None` then any value is allowed (the attribute just must exist). - All of the listed attributes must match for the login to be permitted. -* `enable_registration`: set to 'false' to disable automatic registration of new - users. This allows the CAS SSO flow to be limited to sign in only, rather than - automatically registering users that have a valid SSO login but do not have - a pre-registered account. Defaults to true. -* `allow_numeric_ids`: set to 'true' allow numeric user IDs (default false). - This allows CAS SSO flow to provide user IDs composed of numbers only. - These identifiers will be prefixed by the letter "u" by default. - The prefix can be configured using the "numeric_ids_prefix" option. - Be careful to choose the prefix correctly to avoid any possible conflicts - (e.g. user 1234 becomes u1234 when a user u1234 already exists). -* `numeric_ids_prefix`: the prefix you wish to add in front of a numeric user ID - when the "allow_numeric_ids" option is set to "true". - By default, the prefix is the letter "u" and only alphanumeric characters are allowed. - - *Added in Synapse 1.93.0.* - -Example configuration: -```yaml -cas_config: - enabled: true - server_url: "https://cas-server.com" - protocol_version: 3 - displayname_attribute: name - required_attributes: - userGroup: "staff" - department: None +- idp_id: my_idp + idp_name: My OpenID provider + idp_icon: mxc://example.com/mediaid + discover: false + issuer: https://accounts.example.com/ + client_id: provided-by-your-issuer + client_secret: provided-by-your-issuer + client_auth_method: client_secret_post + scopes: + - openid + - profile + authorization_endpoint: https://accounts.example.com/oauth2/auth + token_endpoint: https://accounts.example.com/oauth2/token + userinfo_endpoint: https://accounts.example.com/userinfo + jwks_uri: https://accounts.example.com/.well-known/jwks.json + additional_authorization_parameters: + acr_values: 2fa + passthrough_authorization_parameters: + - login_hint + skip_verification: true enable_registration: true - allow_numeric_ids: true - numeric_ids_prefix: "numericuser" + user_mapping_provider: + config: + subject_claim: id + localpart_template: '{{ user.login }}' + display_name_template: '{{ user.name }}' + email_template: '{{ user.email }}' + attribute_requirements: + - attribute: userGroup + value: synapseUsers ``` --- ### `sso` -Additional settings to use with single-sign on systems such as OpenID Connect, -SAML2 and CAS. +*(object)* Additional settings to use with single-sign on systems such as OpenID Connect. -Server admins can configure custom templates for pages related to SSO. See -[here](../../templates.md) for more information. +Server admins can configure custom templates for pages related to SSO. See [here](../../templates.md) for more information. -Options include: -* `client_whitelist`: A list of client URLs which are whitelisted so that the user does not - have to confirm giving access to their account to the URL. Any client - whose URL starts with an entry in the following list will not be subject - to an additional confirmation step after the SSO login is completed. - WARNING: An entry such as "https://my.client" is insecure, because it - will also match "https://my.client.evil.site", exposing your users to - phishing attacks from evil.site. To avoid this, include a slash after the - hostname: "https://my.client/". - The login fallback page (used by clients that don't natively support the - required login flows) is whitelisted in addition to any URLs in this list. - By default, this list contains only the login fallback page. -* `update_profile_information`: Use this setting to keep a user's profile fields in sync with information from - the identity provider. Currently only syncing the displayname is supported. Fields - are checked on every SSO login, and are updated if necessary. - Note that enabling this option will override user profile information, - regardless of whether users have opted-out of syncing that - information when first signing in. Defaults to false. +This setting has the following sub-options: + +* `client_whitelist` (array|null): A list of client URLs which are whitelisted so that the user does not have to confirm giving access to their account to the URL. Any client whose URL starts with an entry in the following list will not be subject to an additional confirmation step after the SSO login is completed. + + WARNING: An entry such as "https://my.client" is insecure, because it will also match "https://my.client.evil.site", exposing your users to phishing attacks from evil.site. To avoid this, include a slash after the hostname: "https://my.client/". + The login fallback page (used by clients that don't natively support the required login flows) is whitelisted in addition to any URLs in this list. By default, this list contains only the login fallback page. + + Defaults to `null`. + +* `update_profile_information` (boolean): Use this setting to keep a user's profile fields in sync with information from the identity provider. Currently only syncing the displayname is supported. Fields are checked on every SSO login, and are updated if necessary. Note that enabling this option will override user profile information, regardless of whether users have opted-out of syncing that information when first signing in. Defaults to `false`. Example configuration: ```yaml sso: - client_whitelist: - - https://riot.im/develop - - https://my.custom.client/ - update_profile_information: true + client_whitelist: + - https://riot.im/develop + - https://my.custom.client/ + update_profile_information: true ``` --- ### `jwt_config` -JSON web token integration. The following settings can be used to make -Synapse JSON web tokens for authentication, instead of its internal -password database. +*(object)* JSON web token integration. The following settings can be used to make Synapse JSON web tokens for authentication, instead of its internal password database. -Each JSON Web Token needs to contain a "sub" (subject) claim, which is -used as the localpart of the mxid. +Each JSON Web Token needs to contain a "sub" (subject) claim, which is used as the localpart of the mxid. -Additionally, the expiration time ("exp"), not before time ("nbf"), -and issued at ("iat") claims are validated if present. +Additionally, the expiration time ("exp"), not before time ("nbf"), and issued at ("iat") claims are validated if present. -Note that this is a non-standard login type and client support is -expected to be non-existent. +Note that this is a non-standard login type and client support is expected to be non-existent. See [here](../../jwt.md) for more. -Additional sub-options for this setting include: -* `enabled`: Set to true to enable authorization using JSON web - tokens. Defaults to false. -* `secret`: This is either the private shared secret or the public key used to - decode the contents of the JSON web token. Required if `enabled` is set to true. -* `algorithm`: The algorithm used to sign (or HMAC) the JSON web token. - Supported algorithms are listed - [here (section JWS)](https://docs.authlib.org/en/latest/specs/rfc7518.html). - Required if `enabled` is set to true. -* `subject_claim`: Name of the claim containing a unique identifier for the user. - Optional, defaults to `sub`. -* `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the - "iss" claim will be required and validated for all JSON web tokens. -* `audiences`: A list of audiences to validate the "aud" claim against. Optional. - If provided the "aud" claim will be required and validated for all JSON web tokens. - Note that if the "aud" claim is included in a JSON web token then - validation will fail without configuring audiences. +This setting has the following sub-options: + +* `enabled` (boolean): Set to true to enable authorization using JSON web tokens. Defaults to `false`. + +* `secret` (string): This is either the private shared secret or the public key used to decode the contents of the JSON web token. Required if `enabled` is set to true. + +* `algorithm` (string): The algorithm used to sign (or HMAC) the JSON web token. Supported algorithms are listed [here (section JWS)](https://docs.authlib.org/en/latest/specs/rfc7518.html). Required if `enabled` is set to true. + +* `subject_claim` (string|null): Name of the claim containing a unique identifier for the user. Defaults to `"sub"`. + +* `display_name_claim` (string|null): Name of the claim containing the display name for the user. If provided, the display name will be set to the value of this claim upon first login. Defaults to `null`. + +* `issuer` (string|null): The issuer to validate the "iss" claim against. If provided the "iss" claim will be required and validated for all JSON web tokens. Defaults to `null`. + +* `audiences` (array|null): A list of audiences to validate the "aud" claim against. If provided the "aud" claim will be required and validated for all JSON web tokens. Note that if the "aud" claim is included in a JSON web token then validation will fail without configuring audiences. Defaults to `null`. Example configuration: ```yaml jwt_config: - enabled: true - secret: "provided-by-your-issuer" - algorithm: "provided-by-your-issuer" - subject_claim: "name_of_claim" - issuer: "provided-by-your-issuer" - audiences: - - "provided-by-your-issuer" + enabled: true + secret: provided-by-your-issuer + algorithm: provided-by-your-issuer + subject_claim: name_of_claim + display_name_claim: name_of_claim + issuer: provided-by-your-issuer + audiences: + - provided-by-your-issuer ``` --- ### `password_config` -Use this setting to enable password-based logins. +*(object)* Use this setting to enable password-based logins. This setting has the following sub-options: -* `enabled`: Defaults to true. - Set to false to disable password authentication. - Set to `only_for_reauth` to allow users with existing passwords to use them - to reauthenticate (not log in), whilst preventing new users from setting passwords. -* `localdb_enabled`: Set to false to disable authentication against the local password - database. This is ignored if `enabled` is false, and is only useful - if you have other `password_providers`. Defaults to true. -* `pepper`: Set the value here to a secret random string for extra security. - DO NOT CHANGE THIS AFTER INITIAL SETUP! -* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc. - Each parameter is optional. This is an implementation of MSC2000. Parameters are as follows: - * `enabled`: Defaults to false. Set to true to enable. - * `minimum_length`: Minimum accepted length for a password. Defaults to 0. - * `require_digit`: Whether a password must contain at least one digit. - Defaults to false. - * `require_symbol`: Whether a password must contain at least one symbol. - A symbol is any character that's not a number or a letter. Defaults to false. - * `require_lowercase`: Whether a password must contain at least one lowercase letter. - Defaults to false. - * `require_uppercase`: Whether a password must contain at least one uppercase letter. - Defaults to false. +* `enabled` (boolean|string): Set to false to disable password authentication. Set to `only_for_reauth` to allow users with existing passwords to use them to reauthenticate (not log in), whilst preventing new users from setting passwords. Defaults to `true`. + +* `localdb_enabled` (boolean): Set to false to disable authentication against the local password database. This is ignored if `enabled` is false, and is only useful if you have other `password_providers`. Defaults to `true`. + +* `pepper` (string|null): Set the value here to a secret random string for extra security. DO NOT CHANGE THIS AFTER INITIAL SETUP! Defaults to `null`. + +* `policy` (object): Define and enforce a password policy, such as minimum lengths for passwords, etc. This is an implementation of MSC2000. + + This setting has the following sub-options: + + * `enabled` (boolean): Set to true to enable. Defaults to `false`. + + * `minimum_length` (integer): Minimum accepted length for a password. Defaults to `0`. + + * `require_digit` (boolean): Whether a password must contain at least one digit. Defaults to `false`. + + * `require_symbol` (boolean): Whether a password must contain at least one symbol. A symbol is any character that's not a number or a letter. Defaults to `false`. + + * `require_lowercase` (boolean): Whether a password must contain at least one lowercase letter. Defaults to `false`. + + * `require_uppercase` (boolean): Whether a password must contain at least one uppercase letter. Defaults to `false`. Example configuration: ```yaml password_config: - enabled: false - localdb_enabled: false - pepper: "EVEN_MORE_SECRET" - - policy: - enabled: true - minimum_length: 15 - require_digit: true - require_symbol: true - require_lowercase: true - require_uppercase: true + enabled: false + localdb_enabled: false + pepper: EVEN_MORE_SECRET + policy: + enabled: true + minimum_length: 15 + require_digit: true + require_symbol: true + require_lowercase: true + require_uppercase: true ``` --- ## Push -Configuration settings related to push notifications + +Configuration settings related to push notifications. --- ### `push` -This setting defines options for push notifications. - -This option has a number of sub-options. They are as follows: -* `enabled`: Enables or disables push notification calculation. Note, disabling this will also - stop unread counts being calculated for rooms. This mode of operation is intended - for homeservers which may only have bots or appservice users connected, or are otherwise - not interested in push/unread counters. This is enabled by default. -* `include_content`: Clients requesting push notifications can either have the body of - the message sent in the notification poke along with other details - like the sender, or just the event ID and room ID (`event_id_only`). - If clients choose the to have the body sent, this option controls whether the - notification request includes the content of the event (other details - like the sender are still included). If `event_id_only` is enabled, it - has no effect. - For modern android devices the notification content will still appear - because it is loaded by the app. iPhone, however will send a - notification saying only that a message arrived and who it came from. - Defaults to true. Set to false to only include the event ID and room ID in push notification payloads. -* `group_unread_count_by_room: false`: When a push notification is received, an unread count is also sent. - This number can either be calculated as the number of unread messages for the user, or the number of *rooms* the - user has unread messages in. Defaults to true, meaning push clients will see the number of - rooms with unread messages in them. Set to false to instead send the number - of unread messages. -* `jitter_delay`: Delays push notifications by a random amount up to the given - duration. Useful for mitigating timing attacks. Optional, defaults to no - delay. _Added in Synapse 1.84.0._ +*(object)* This setting defines options for push notifications. + +This setting has the following sub-options: + +* `enabled` (boolean): Enables or disables push notification calculation. Note, disabling this will also stop unread counts being calculated for rooms. This mode of operation is intended for homeservers which may only have bots or appservice users connected, or are otherwise not interested in push/unread counters. Defaults to `true`. + +* `include_content` (boolean): Clients requesting push notifications can either have the body of the message sent in the notification poke along with other details like the sender, or just the event ID and room ID (`event_id_only`). If clients choose to have the body sent, this option controls whether the notification request includes the content of the event (other details like the sender are still included). If `event_id_only` is enabled, it has no effect. For modern Android devices the notification content will still appear because it is loaded by the app. iPhone, however will send a notification saying only that a message arrived and who it came from. Set to false to only include the event ID and room ID in push notification payloads. Defaults to `true`. + +* `group_unread_count_by_room` (boolean): When a push notification is received, an unread count is also sent. This number can either be calculated as the number of unread messages for the user, or the number of *rooms* the user has unread messages in. If true, push clients will see the number of rooms with unread messages in them. Set to false to instead send the number of unread messages. Defaults to `true`. + +* `jitter_delay` (duration): Delays push notifications by a random amount up to the given duration. Useful for mitigating timing attacks. Optional. + + _Added in Synapse 1.84.0._ + + Defaults to `"0s"`. Example configuration: ```yaml @@ -3794,29 +3411,27 @@ push: enabled: true include_content: false group_unread_count_by_room: false - jitter_delay: "10s" + jitter_delay: 10s ``` --- ## Rooms + Config options relating to rooms. --- ### `encryption_enabled_by_default_for_room_type` -Controls whether locally-created rooms should be end-to-end encrypted by -default. +*(string)* Controls whether locally-created rooms should be end-to-end encrypted by default. Possible options are "all", "invite", and "off". They are defined as: * "all": any locally-created room -* "invite": any room created with the `private_chat` or `trusted_private_chat` - room creation presets +* "invite": any room created with the `private_chat` or `trusted_private_chat` room creation presets * "off": this option will take no effect -The default value is "off". +Note that this option will only affect rooms created after it is set. It will also not affect rooms created by other servers. -Note that this option will only affect rooms created after it is set. It -will also not affect rooms created by other servers. +Defaults to `"off"`. Example configuration: ```yaml @@ -3825,72 +3440,63 @@ encryption_enabled_by_default_for_room_type: invite --- ### `user_directory` -This setting defines options related to the user directory. +*(object)* This setting defines options related to the user directory. + +This setting has the following sub-options: + +* `enabled` (boolean): Defines whether users can search the user directory. If false then empty responses are returned to all queries. Defaults to `true`. + +* `search_all_users` (boolean): Defines whether to search all users visible to your homeserver at the time the search is performed. If set to true, will return all users known to the homeserver matching the search query. If false, search results will only contain users visible in public rooms and users sharing a room with the requester. + + NB. If you set this to true, and the last time the user_directory search indexes were (re)built was before Synapse 1.44, you'll have to rebuild the indexes in order to search through all known users. + + These indexes are built the first time Synapse starts; admins can manually trigger a rebuild via the API following the instructions [for running background updates](../administration/admin_api/background_updates.md#run), set to true to return search results containing all known users, even if that user does not share a room with the requester. + + Defaults to `false`. -This option has the following sub-options: -* `enabled`: Defines whether users can search the user directory. If false then - empty responses are returned to all queries. Defaults to true. -* `search_all_users`: Defines whether to search all users visible to your homeserver at the time the search is performed. - If set to true, will return all users known to the homeserver matching the search query. - If false, search results will only contain users - visible in public rooms and users sharing a room with the requester. - Defaults to false. +* `prefer_local_users` (boolean): Defines whether to prefer local users in search query results. If set to true, local users are more likely to appear above remote users when searching the user directory. Defaults to `false`. - NB. If you set this to true, and the last time the user_directory search - indexes were (re)built was before Synapse 1.44, you'll have to - rebuild the indexes in order to search through all known users. +* `exclude_remote_users` (boolean): If set to true, the search will only return local users. Defaults to `false`. - These indexes are built the first time Synapse starts; admins can - manually trigger a rebuild via the API following the instructions - [for running background updates](../administration/admin_api/background_updates.md#run), - set to true to return search results containing all known users, even if that - user does not share a room with the requester. -* `prefer_local_users`: Defines whether to prefer local users in search query results. - If set to true, local users are more likely to appear above remote users when searching the - user directory. Defaults to false. -* `show_locked_users`: Defines whether to show locked users in search query results. Defaults to false. +* `show_locked_users` (boolean): Defines whether to show locked users in search query results. Defaults to `false`. Example configuration: ```yaml user_directory: - enabled: false - search_all_users: true - prefer_local_users: true - show_locked_users: true + enabled: false + search_all_users: true + prefer_local_users: true + exclude_remote_users: false + show_locked_users: true ``` --- ### `user_consent` -For detailed instructions on user consent configuration, see [here](../../consent_tracking.md). +*(object)* For detailed instructions on user consent configuration, see [here](../../consent_tracking.md). + +Parts of this section are required if enabling the `consent` resource under [`listeners`](#listeners), in particular `template_dir` and `version`. + +This setting has the following sub-options: + +* `template_dir` (string): Gives the location of the templates for the HTML forms. This directory should contain one subdirectory per language (eg, `en`, `fr`), and each language directory should contain the policy document (named as <version>.html) and a success page (success.html). + +* `version` (number): Specifies the "current" version of the policy document. It defines the version to be served by the consent resource if there is no `v` parameter. -Parts of this section are required if enabling the `consent` resource under -[`listeners`](#listeners), in particular `template_dir` and `version`. +* `server_notice_content` (object): If enabled, will send a user a "Server Notice" asking them to consent to the privacy policy. The [`server_notices` section](#server_notices) must also be configured for this to work. Notices will *not* be sent to guest users unless `send_server_notice_to_guests` is set to true. -* `template_dir`: gives the location of the templates for the HTML forms. - This directory should contain one subdirectory per language (eg, `en`, `fr`), - and each language directory should contain the policy document (named as - <version>.html) and a success page (success.html). + This setting has the following sub-options: -* `version`: specifies the 'current' version of the policy document. It defines - the version to be served by the consent resource if there is no 'v' - parameter. + * `msgtype` (string): Message type of the notice event. -* `server_notice_content`: if enabled, will send a user a "Server Notice" - asking them to consent to the privacy policy. The [`server_notices` section](#server_notices) - must also be configured for this to work. Notices will *not* be sent to - guest users unless `send_server_notice_to_guests` is set to true. + * `body` (string): Message template for the server notice event body. -* `block_events_error`, if set, will block any attempts to send events - until the user consents to the privacy policy. The value of the setting is - used as the text of the error. +* `send_server_notice_to_guests` (boolean): Send server notices to guest users, too. Defaults to `false`. -* `require_at_registration`, if enabled, will add a step to the registration - process, similar to how captcha works. Users will be required to accept the - policy before their account is created. +* `block_events_error` (string|null): If set, will block any attempts to send events until the user consents to the privacy policy. The value of the setting is used as the text of the error. Defaults to `null`. -* `policy_name` is the display name of the policy users will see when registering - for an account. Has no effect unless `require_at_registration` is enabled. - Defaults to "Privacy Policy". +* `require_at_registration` (boolean): If enabled, will add a step to the registration process, similar to how captcha works. Users will be required to accept the policy before their account is created. + +* `policy_name` (string): Human-readable name of the privacy policy. Defaults to `"Privacy Policy"`. Example configuration: ```yaml @@ -3899,25 +3505,22 @@ user_consent: version: 1.0 server_notice_content: msgtype: m.text - body: >- - To continue using this homeserver you must review and agree to the - terms and conditions at %(consent_uri)s + body: To continue using this homeserver you must review and agree to the terms + and conditions at %(consent_uri)s send_server_notice_to_guests: true - block_events_error: >- - To continue using this homeserver you must review and agree to the - terms and conditions at %(consent_uri)s + block_events_error: To continue using this homeserver you must review and agree + to the terms and conditions at %(consent_uri)s require_at_registration: false policy_name: Privacy Policy ``` --- ### `stats` -Settings for local room and user statistics collection. See [here](../../room_and_user_statistics.md) -for more. +*(object)* Settings for local room and user statistics collection. See [here](../../room_and_user_statistics.md) for more. + +This setting has the following sub-options: -* `enabled`: Set to false to disable room and user statistics. Note that doing - so may cause certain features (such as the room directory) not to work - correctly. Defaults to true. +* `enabled` (boolean): Set to false to disable room and user statistics. Note that doing so may cause certain features (such as the room directory) not to work correctly. Defaults to `true`. Example configuration: ```yaml @@ -3927,42 +3530,53 @@ stats: --- ### `server_notices` -Use this setting to enable a room which can be used to send notices -from the server to users. It is a special room which users cannot leave; notices -in the room come from a special "notices" user id. +*(object)* Use this setting to enable a room which can be used to send notices from the server to users. It is a special room which users cannot leave; notices in the room come from a special "notices" user id. -If you use this setting, you *must* define the `system_mxid_localpart` -sub-setting, which defines the id of the user which will be used to send the -notices. - -Sub-options for this setting include: -* `system_mxid_display_name`: set the display name of the "notices" user -* `system_mxid_avatar_url`: set the avatar for the "notices" user -* `room_name`: set the room name of the server notices room -* `room_avatar_url`: optional string. The room avatar to use for server notice rooms. If set to the empty string `""`, notice rooms will not be given an avatar. Defaults to the empty string. _Added in Synapse 1.99.0._ -* `room_topic`: optional string. The topic to use for server notice rooms. If set to the empty string `""`, notice rooms will not be given a topic. Defaults to the empty string. _Added in Synapse 1.99.0._ -* `auto_join`: boolean. If true, the user will be automatically joined to the room instead of being invited. - Defaults to false. _Added in Synapse 1.98.0._ +If you use this setting, you *must* define the `system_mxid_localpart` sub-setting, which defines the id of the user which will be used to send the notices. Note that the name, topic and avatar of existing server notice rooms will only be updated when a new notice event is sent. +This setting has the following sub-options: + +* `system_mxid_display_name` (string): Display name of the "notices" user. Defaults to `"Notices"`. + +* `system_mxid_avatar_url` (string|null): Avatar for the "notices" user. Defaults to `null`. + +* `room_name` (string): Room name of the server notices room. Defaults to `"Server Notices"`. + +* `room_avatar_url` (string|null): Room avatar to use for server notice rooms. If set to the empty string `""`, notice rooms will not be given an avatar. + + _Added in Synapse 1.99.0._ + + Defaults to `null`. + +* `room_topic` (string|null): Topic to use for server notice rooms. If set to the empty string `""`, notice rooms will not be given a topic. Defaults to the empty string. + + _Added in Synapse 1.99.0._ + + Defaults to `null`. + +* `auto_join` (boolean): If true, the user will be automatically joined to the room instead of being invited. + + _Added in Synapse 1.98.0._ + + Defaults to `false`. + Example configuration: ```yaml server_notices: system_mxid_localpart: notices - system_mxid_display_name: "Server Notices" - system_mxid_avatar_url: "mxc://example.com/oumMVlgDnLYFaPVkExemNVVZ" - room_name: "Server Notices" - room_avatar_url: "mxc://example.com/oumMVlgDnLYFaPVkExemNVVZ" - room_topic: "Room used by your server admin to notice you of important information" + system_mxid_display_name: Server Notices + system_mxid_avatar_url: mxc://example.com/oumMVlgDnLYFaPVkExemNVVZ + room_name: Server Notices + room_avatar_url: mxc://example.com/oumMVlgDnLYFaPVkExemNVVZ + room_topic: Room used by your server admin to notice you of important information auto_join: true ``` --- ### `enable_room_list_search` -Set to false to disable searching the public room list. When disabled -blocks searching local and remote room lists for local and remote -users by always returning an empty list for all queries. Defaults to true. +*(boolean)* Set to false to disable searching the public room list. When disabled blocks searching local and remote room lists for local and remote users by always returning an empty list for all queries. Defaults to `true`. Example configuration: ```yaml @@ -3971,191 +3585,142 @@ enable_room_list_search: false --- ### `alias_creation_rules` -The `alias_creation_rules` option allows server admins to prevent unwanted -alias creation on this server. +*(array|null)* The `alias_creation_rules` option allows server admins to prevent unwanted alias creation on this server. -This setting is an optional list of 0 or more rules. By default, no list is -provided, meaning that all alias creations are permitted. +This setting is an optional list of 0 or more rules. By default, no list is provided, meaning that all alias creations are permitted. -Otherwise, requests to create aliases are matched against each rule in order. -The first rule that matches decides if the request is allowed or denied. If no -rule matches, the request is denied. In particular, this means that configuring -an empty list of rules will deny every alias creation request. +Otherwise, requests to create aliases are matched against each rule in order. The first rule that matches decides if the request is allowed or denied. If no rule matches, the request is denied. In particular, this means that configuring an empty list of rules will deny every alias creation request. -Each rule is a YAML object containing four fields, each of which is an optional string: +Each of the glob patterns is optional, defaulting to `*` ("match anything"). Note that the patterns match against fully qualified IDs, e.g. against `@alice:example.com`, `#room:example.com` and `!abcdefghijk:example.com` instead of `alice`, `room` and `abcedgghijk`. -* `user_id`: a glob pattern that matches against the creator of the alias. -* `alias`: a glob pattern that matches against the alias being created. -* `room_id`: a glob pattern that matches against the room ID the alias is being pointed at. -* `action`: either `allow` or `deny`. What to do with the request if the rule matches. Defaults to `allow`. +Each rule is a YAML object containing four fields, each of which is an optional string -Each of the glob patterns is optional, defaulting to `*` ("match anything"). -Note that the patterns match against fully qualified IDs, e.g. against -`@alice:example.com`, `#room:example.com` and `!abcdefghijk:example.com` instead -of `alice`, `room` and `abcedgghijk`. +Defaults to `null`. -Example configuration: +Options for each entry include: + +* `user_id` (string|null): Glob pattern that matches against the creator of the alias. + +* `alias` (string|null): Glob pattern that matches against the alias being created. +* `room_id` (string|null): Glob pattern that matches against the room ID the alias is being pointed at. + +* `action` (string): Either `allow` or `deny`. What to do with the request if the rule matches. Defaults to `allow`. + +Example configurations: ```yaml -# No rule list specified. All alias creations are allowed. -# This is the default behaviour. -alias_creation_rules: +alias_creation_rules: null ``` ```yaml -# A list of one rule which allows everything. -# This has the same effect as the previous example. alias_creation_rules: - - "action": "allow" +- action: allow ``` ```yaml -# An empty list of rules. All alias creations are denied. alias_creation_rules: [] ``` ```yaml -# A list of one rule which denies everything. -# This has the same effect as the previous example. alias_creation_rules: - - "action": "deny" +- action: deny ``` ```yaml -# Prevent a specific user from creating aliases. -# Allow other users to create any alias alias_creation_rules: - - user_id: "@bad_user:example.com" - action: deny - - - action: allow +- user_id: '@bad_user:example.com' + action: deny +- action: allow ``` ```yaml -# Prevent aliases being created which point to a specific room. alias_creation_rules: - - room_id: "!forbiddenRoom:example.com" - action: deny - - - action: allow +- room_id: '!forbiddenRoom:example.com' + action: deny +- action: allow ``` - --- ### `room_list_publication_rules` -The `room_list_publication_rules` option allows server admins to prevent -unwanted entries from being published in the public room list. +*(array|null)* The `room_list_publication_rules` option allows server admins to prevent unwanted entries from being published in the public room list. + +The format of this option is the same as that for [`alias_creation_rules`](#alias_creation_rules): an optional list of 0 or more rules. By default, no list is provided, meaning that no one may publish to the room list (except server admins). + +Otherwise, requests to publish a room are matched against each rule in order. The first rule that matches decides if the request is allowed or denied. If no rule matches, the request is denied. In particular, this means that configuring an empty list of rules will deny every alias creation request. + +Requests to create a public (public as in published to the room directory) room which violates the configured rules will result in the room being created but not published to the room directory. -The format of this option is the same as that for -[`alias_creation_rules`](#alias_creation_rules): an optional list of 0 or more -rules. By default, no list is provided, meaning that all rooms may be -published to the room list. +Each of the glob patterns is optional, defaulting to `*` ("match anything"). Note that the patterns match against fully qualified IDs, e.g. against `@alice:example.com`, `#room:example.com` and `!abcdefghijk:example.com` instead of `alice`, `room` and `abcedgghijk`. -Otherwise, requests to publish a room are matched against each rule in order. -The first rule that matches decides if the request is allowed or denied. If no -rule matches, the request is denied. In particular, this means that configuring -an empty list of rules will deny every alias creation request. +Each rule is a YAML object containing four fields, each of which is an optional string. -Requests to create a public (public as in published to the room directory) room which violates -the configured rules will result in the room being created but not published to the room directory. +_Changed in Synapse 1.126.0: The default was changed to deny publishing to the room list by default_ -Each rule is a YAML object containing four fields, each of which is an optional string: +Defaults to `null`. -* `user_id`: a glob pattern that matches against the user publishing the room. -* `alias`: a glob pattern that matches against one of published room's aliases. +Options for each entry include: + +* `user_id` (string|null): Glob pattern that matches against the user publishing the room. + +* `alias` (string|null): Glob pattern that matches against one of published room's aliases. - If the room has no aliases, the alias match fails unless `alias` is unspecified or `*`. - If the room has exactly one alias, the alias match succeeds if the `alias` pattern matches that alias. - If the room has two or more aliases, the alias match succeeds if the pattern matches at least one of the aliases. -* `room_id`: a glob pattern that matches against the room ID of the room being published. -* `action`: either `allow` or `deny`. What to do with the request if the rule matches. Defaults to `allow`. -Each of the glob patterns is optional, defaulting to `*` ("match anything"). -Note that the patterns match against fully qualified IDs, e.g. against -`@alice:example.com`, `#room:example.com` and `!abcdefghijk:example.com` instead -of `alice`, `room` and `abcedgghijk`. +* `room_id` (string|null): Glob pattern that matches against the room ID of the room being published. +* `action` (string): Either `allow` or `deny`. What to do with the request if the rule matches. Defaults to `allow`. -Example configuration: - +Example configurations: ```yaml -# No rule list specified. Anyone may publish any room to the public list. -# This is the default behaviour. -room_list_publication_rules: +room_list_publication_rules: null ``` ```yaml -# A list of one rule which allows everything. -# This has the same effect as the previous example. room_list_publication_rules: - - "action": "allow" +- action: deny ``` ```yaml -# An empty list of rules. No-one may publish to the room list. room_list_publication_rules: [] ``` ```yaml -# A list of one rule which denies everything. -# This has the same effect as the previous example. room_list_publication_rules: - - "action": "deny" +- action: allow ``` ```yaml -# Prevent a specific user from publishing rooms. -# Allow other users to publish anything. room_list_publication_rules: - - user_id: "@bad_user:example.com" - action: deny - - - action: allow +- user_id: '@bad_user:example.com' + action: deny +- action: allow ``` ```yaml -# Prevent publication of a specific room. room_list_publication_rules: - - room_id: "!forbiddenRoom:example.com" - action: deny - - - action: allow +- room_id: '!forbiddenRoom:example.com' + action: deny +- action: allow ``` ```yaml -# Prevent publication of rooms with at least one alias containing the word "potato". room_list_publication_rules: - - alias: "#*potato*:example.com" - action: deny - - - action: allow +- alias: '#*potato*:example.com' + action: deny +- action: allow ``` - --- ### `default_power_level_content_override` -The `default_power_level_content_override` option controls the default power -levels for rooms. +*(object)* The `default_power_level_content_override` option controls the default power levels for rooms. -Useful if you know that your users need special permissions in rooms -that they create (e.g. to send particular types of state events without -needing an elevated power level). This takes the same shape as the -`power_level_content_override` parameter in the /createRoom API, but -is applied before that parameter. +Useful if you know that your users need special permissions in rooms that they create (e.g. to send particular types of state events without needing an elevated power level). This takes the same shape as the `power_level_content_override` parameter in the /createRoom API, but is applied before that parameter. -Note that each key provided inside a preset (for example `events` in the example -below) will overwrite all existing defaults inside that key. So in the example -below, newly-created private_chat rooms will have no rules for any event types -except `com.example.foo`. - -Example configuration: -```yaml -default_power_level_content_override: - private_chat: { "events": { "com.example.foo" : 0 } } - trusted_private_chat: null - public_chat: null -``` +Note that each key provided inside a preset (for example `events` in the example below) will overwrite all existing defaults inside that key. So in Example #1, newly-created private_chat rooms will have no rules for any event types except `com.example.foo`. The default power levels for each preset are: + ```yaml "m.room.name": 50 "m.room.power_levels": 100 @@ -4167,130 +3732,140 @@ The default power levels for each preset are: "m.room.encryption": 100 ``` -So a complete example where the default power-levels for a preset are maintained -but the power level for a new key is set is: +In Example #2 the default power-levels for a preset are maintained, but the power level for a new key is set. + +Defaults to `{}`. + +Example configurations: ```yaml default_power_level_content_override: - private_chat: + private_chat: events: - "com.example.foo": 0 - "m.room.name": 50 - "m.room.power_levels": 100 - "m.room.history_visibility": 100 - "m.room.canonical_alias": 50 - "m.room.avatar": 50 - "m.room.tombstone": 100 - "m.room.server_acl": 100 - "m.room.encryption": 100 - trusted_private_chat: null - public_chat: null + com.example.foo: 0 + trusted_private_chat: null + public_chat: null ``` +```yaml +default_power_level_content_override: + private_chat: + events: + com.example.foo: 0 + m.room.name: 50 + m.room.power_levels: 100 + m.room.history_visibility: 100 + m.room.canonical_alias: 50 + m.room.avatar: 50 + m.room.tombstone: 100 + m.room.server_acl: 100 + m.room.encryption: 100 + trusted_private_chat: null + public_chat: null +``` --- ### `forget_rooms_on_leave` -Set to true to automatically forget rooms for users when they leave them, either -normally or via a kick or ban. Defaults to false. +*(boolean)* Set to true to automatically forget rooms for users when they leave them, either normally or via a kick or ban. Defaults to `false`. Example configuration: ```yaml -forget_rooms_on_leave: false +forget_rooms_on_leave: true ``` --- ### `exclude_rooms_from_sync` -A list of rooms to exclude from sync responses. This is useful for server -administrators wishing to group users into a room without these users being able -to see it from their client. -By default, no room is excluded. +*(array)* A list of rooms to exclude from sync responses. This is useful for server administrators wishing to group users into a room without these users being able to see it from their client. Defaults to `[]`. Example configuration: ```yaml exclude_rooms_from_sync: - - "!foo:example.com" +- '!foo:example.com' ``` - --- ## Opentracing + Configuration options related to Opentracing support. --- ### `opentracing` -These settings enable and configure opentracing, which implements distributed tracing. -This allows you to observe the causal chains of events across servers -including requests, key lookups etc., across any server running -synapse or any other services which support opentracing -(specifically those implemented with Jaeger). +*(object)* These settings enable and configure opentracing, which implements distributed tracing. This allows you to observe the causal chains of events across servers including requests, key lookups etc., across any server running synapse or any other services which support opentracing (specifically those implemented with Jaeger). + +This setting has the following sub-options: + +* `enabled` (boolean): Whether tracing is enabled. Set to true to enable. Defaults to `false`. + +* `homeserver_whitelist` (array): The list of homeservers we wish to send and receive span contexts and span baggage. See [here](../../opentracing.md) for more. This is a list of regexes which are matched against the `server_name` of the homeserver. If the list is empty, no servers are matched. Defaults to `[]`. -Sub-options include: -* `enabled`: whether tracing is enabled. Set to true to enable. Disabled by default. -* `homeserver_whitelist`: The list of homeservers we wish to send and receive span contexts and span baggage. - See [here](../../opentracing.md) for more. - This is a list of regexes which are matched against the `server_name` of the homeserver. - By default, it is empty, so no servers are matched. -* `force_tracing_for_users`: # A list of the matrix IDs of users whose requests will always be traced, - even if the tracing system would otherwise drop the traces due to probabilistic sampling. - By default, the list is empty. -* `jaeger_config`: Jaeger can be configured to sample traces at different rates. - All configuration options provided by Jaeger can be set here. Jaeger's configuration is - mostly related to trace sampling which is documented [here](https://www.jaegertracing.io/docs/latest/sampling/). +* `force_tracing_for_users` (array): A list of the matrix IDs of users whose requests will always be traced, even if the tracing system would otherwise drop the traces due to probabilistic sampling. Defaults to `[]`. + +* `jaeger_config` (object): Jaeger can be configured to sample traces at different rates. All configuration options provided by Jaeger can be set here. Jaeger's configuration is mostly related to trace sampling which is documented [here](https://www.jaegertracing.io/docs/latest/sampling/). Defaults to `{}`. Example configuration: ```yaml opentracing: - enabled: true - homeserver_whitelist: - - ".*" - force_tracing_for_users: - - "@user1:server_name" - - "@user2:server_name" - - jaeger_config: - sampler: - type: const - param: 1 - logging: - false + enabled: true + homeserver_whitelist: + - .* + force_tracing_for_users: + - '@user1:server_name' + - '@user2:server_name' + jaeger_config: + sampler: + type: const + param: 1 + logging: false ``` --- ## Coordinating workers -Configuration options related to workers which belong in the main config file -(usually called `homeserver.yaml`). -A Synapse deployment can scale horizontally by running multiple Synapse processes -called _workers_. Incoming requests are distributed between workers to handle higher -loads. Some workers are privileged and can accept requests from other workers. + +Configuration options related to workers which belong in the main config file (usually called `homeserver.yaml`). A Synapse deployment can scale horizontally by running multiple Synapse processes called _workers_. Incoming requests are distributed between workers to handle higher loads. Some workers are privileged and can accept requests from other workers. As a result, the worker configuration is divided into two parts. -1. The first part (in this section of the manual) defines which shardable tasks - are delegated to privileged workers. This allows unprivileged workers to make - requests to a privileged worker to act on their behalf. -1. [The second part](#individual-worker-configuration) - controls the behaviour of individual workers in isolation. +1. The first part (in this section of the manual) defines which shardable tasks are delegated to privileged workers. This allows unprivileged workers to make requests to a privileged worker to act on their behalf. +2. [The second part](#individual-worker-configuration) controls the behaviour of individual workers in isolation. For guidance on setting up workers, see the [worker documentation](../../workers.md). --- ### `worker_replication_secret` -A shared secret used by the replication APIs on the main process to authenticate -HTTP requests from workers. +*(string|null)* A shared secret used by the replication APIs on the main process to authenticate HTTP requests from workers. -The default, this value is omitted (equivalently `null`), which means that -traffic between the workers and the main process is not authenticated. +If unset or null, traffic between the workers and the main process is not authenticated. + +Replacing an existing `worker_replication_secret` with a new one will break communication with all workers that have not yet updated their secret. + +Defaults to `null`. Example configuration: ```yaml -worker_replication_secret: "secret_secret" +worker_replication_secret: secret_secret +``` +--- +### `worker_replication_secret_path` + +*(string|null)* An alternative to [`worker_replication_secret`](#worker_replication_secret): allows the secret to be specified in an external file. + +The file should be a plain text file, containing only the secret. Synapse reads the secret from the given file once at startup. + +_Added in Synapse 1.126.0._ + +Defaults to `null`. + +Example configuration: +```yaml +worker_replication_secret_path: /path/to/secrets/file ``` --- ### `start_pushers` -Unnecessary to set if using [`pusher_instances`](#pusher_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker). +*(boolean)* Unnecessary to set if using [`pusher_instances`](#pusher_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker). + +Controls sending of push notifications on the main process. Set to `false` if using a [pusher worker](../../workers.md#synapseapppusher). -Controls sending of push notifications on the main process. Set to `false` -if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`. +Defaults to `true`. Example configuration: ```yaml @@ -4299,32 +3874,26 @@ start_pushers: false --- ### `pusher_instances` -It is possible to scale the processes that handle sending push notifications to [sygnal](https://github.com/matrix-org/sygnal) -and email by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to -a `pusher_instances` map. Doing so will remove handling of this function from the main -process. Multiple workers can be added to this map, in which case the work is balanced -across them. Ensure the main process and all pusher workers are restarted after changing -this option. +*(array)* It is possible to scale the processes that handle sending push notifications to [sygnal](https://github.com/matrix-org/sygnal) and email by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to a `pusher_instances` map. Doing so will remove handling of this function from the main process. Multiple workers can be added to this map, in which case the work is balanced across them. Ensure the main process and all pusher workers are restarted after changing this option. Defaults to `[]`. -Example configuration for a single worker: +Example configurations: ```yaml pusher_instances: - - pusher_worker1 +- pusher_worker1 ``` -And for multiple workers: + ```yaml pusher_instances: - - pusher_worker1 - - pusher_worker2 +- pusher_worker1 +- pusher_worker2 ``` - --- ### `send_federation` -Unnecessary to set if using [`federation_sender_instances`](#federation_sender_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker). +*(boolean)* Unnecessary to set if using [`federation_sender_instances`](#federation_sender_instances) with [`generic_workers`](../../workers.md#synapseappgeneric_worker). + +Controls sending of outbound federation transactions on the main process. Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender). -Controls sending of outbound federation transactions on the main process. -Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender). Defaults to `true`. Example configuration: @@ -4334,42 +3903,31 @@ send_federation: false --- ### `federation_sender_instances` -It is possible to scale the processes that handle sending outbound federation requests -by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to -a `federation_sender_instances` map. Doing so will remove handling of this function from -the main process. Multiple workers can be added to this map, in which case the work is -balanced across them. +*(array)* It is possible to scale the processes that handle sending outbound federation requests by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to a `federation_sender_instances` map. Doing so will remove handling of this function from the main process. Multiple workers can be added to this map, in which case the work is balanced across them. -This configuration setting must be shared between all workers handling federation -sending, and if changed all federation sender workers must be stopped at the same time -and then started, to ensure that all instances are running with the same config (otherwise -events may be dropped). +The way that the load balancing works is any outbound federation request will be assigned to a federation sender worker based on the hash of the destination server name. This means that all requests being sent to the same destination will be processed by the same worker instance. Multiple `federation_sender_instances` are useful if there is a federation with multiple servers. -Example configuration for a single worker: +This configuration setting must be shared between all workers handling federation sending, and if changed all federation sender workers must be stopped at the same time and then started, to ensure that all instances are running with the same config (otherwise events may be dropped). + +Defaults to `[]`. + +Example configurations: ```yaml federation_sender_instances: - - federation_sender1 +- federation_sender1 ``` -And for multiple workers: + ```yaml federation_sender_instances: - - federation_sender1 - - federation_sender2 +- federation_sender1 +- federation_sender2 ``` --- ### `instance_map` -When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP -replication listener of the worker, if configured, and to the main process. Each worker -declared under [`stream_writers`](../../workers.md#stream-writers) and -[`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP -replication listener, and that listener should be included in the `instance_map`. The -main process also needs an entry on the `instance_map`, and it should be listed under -`main` **if even one other worker exists**. Ensure the port matches with what is -declared inside the `listener` block for a `replication` listener. +*(object)* When using workers this should be a map from [`worker_name`](#worker_name) to the HTTP replication listener of the worker, if configured, and to the main process. Each worker declared under [`stream_writers`](../../workers.md#stream-writers) and [`outbound_federation_restricted_to`](#outbound_federation_restricted_to) needs a HTTP replication listener, and that listener should be included in the `instance_map`. The main process also needs an entry on the `instance_map`, and it should be listed under `main` **if even one other worker exists**. Ensure the port matches with what is declared inside the `listener` block for a `replication` listener. Defaults to `{}`. - -Example configuration: +Example configurations: ```yaml instance_map: main: @@ -4378,8 +3936,12 @@ instance_map: worker1: host: localhost port: 8034 + other: + host: localhost + port: 8035 + tls: true ``` -Example configuration(#2, for UNIX sockets): + ```yaml instance_map: main: @@ -4390,12 +3952,27 @@ instance_map: --- ### `stream_writers` -Experimental: When using workers you can define which workers should -handle writing to streams such as event persistence and typing notifications. -Any worker specified here must also be in the [`instance_map`](#instance_map). +*(object)* Experimental: When using workers you can define which workers should handle writing to streams such as event persistence and typing notifications. Any worker specified here must also be in the [`instance_map`](#instance_map). + +See the list of available streams in the [worker documentation](../../workers.md#stream-writers). + +Defaults to `{}`. + +This setting has the following sub-options: + +* `events` (string): Name of a worker assigned to the `events` stream. + +* `typing` (string): Name of a worker assigned to the `typing` stream. + +* `to_device` (string): Name of a worker assigned to the `to_device` stream. + +* `account_data` (string): Name of a worker assigned to the `account_data` stream. -See the list of available streams in the -[worker documentation](../../workers.md#stream-writers). +* `receipts` (string): Name of a worker assigned to the `receipts` stream. + +* `presence` (string): Name of a worker assigned to the `presence` stream. + +* `push_rules` (string): Name of a worker assigned to the `push_rules` stream. Example configuration: ```yaml @@ -4406,30 +3983,24 @@ stream_writers: --- ### `outbound_federation_restricted_to` -When using workers, you can restrict outbound federation traffic to only go through a -specific subset of workers. Any worker specified here must also be in the -[`instance_map`](#instance_map). -[`worker_replication_secret`](#worker_replication_secret) must also be configured to -authorize inter-worker communication. +*(array)* When using workers, you can restrict outbound federation traffic to only go through a specific subset of workers. Any worker specified here must also be in the [`instance_map`](#instance_map). [`worker_replication_secret`](#worker_replication_secret) must also be configured to authorize inter-worker communication. -```yaml -outbound_federation_restricted_to: - - federation_sender1 - - federation_sender2 -``` - -Also see the [worker -documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers) -for more info. +Also see the [worker documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers) for more info. _Added in Synapse 1.89.0._ +Defaults to `[]`. + +Example configuration: +```yaml +outbound_federation_restricted_to: +- federation_sender1 +- federation_sender2 +``` --- ### `run_background_tasks_on` -The [worker](../../workers.md#background-tasks) that is used to run -background tasks (e.g. cleaning up expired data). If not provided this -defaults to the main process. +*(string|null)* The [worker](../../workers.md#background-tasks) that is used to run background tasks (e.g. cleaning up expired data). If not provided this defaults to the main process. Defaults to `null`. Example configuration: ```yaml @@ -4438,68 +4009,80 @@ run_background_tasks_on: worker1 --- ### `update_user_directory_from_worker` -The [worker](../../workers.md#updating-the-user-directory) that is used to -update the user directory. If not provided this defaults to the main process. +*(string|null)* The [worker](../../workers.md#updating-the-user-directory) that is used to update the user directory. If not provided this defaults to the main process. + +_Added in Synapse 1.59.0._ + +Defaults to `null`. Example configuration: ```yaml update_user_directory_from_worker: worker1 ``` - -_Added in Synapse 1.59.0._ - --- ### `notify_appservices_from_worker` -The [worker](../../workers.md#notifying-application-services) that is used to -send output traffic to Application Services. If not provided this defaults -to the main process. +*(string|null)* The [worker](../../workers.md#notifying-application-services) that is used to send output traffic to Application Services. If not provided this defaults to the main process. + +_Added in Synapse 1.59.0._ + +Defaults to `null`. Example configuration: ```yaml notify_appservices_from_worker: worker1 ``` - -_Added in Synapse 1.59.0._ - --- ### `media_instance_running_background_jobs` -The [worker](../../workers.md#synapseappmedia_repository) that is used to run -background tasks for media repository. If running multiple media repositories -you must configure a single instance to run the background tasks. If not provided -this defaults to the main process or your single `media_repository` worker. +*(string|null)* The [worker](../../workers.md#synapseappmedia_repository) that is used to run background tasks for media repository. If running multiple media repositories you must configure a single instance to run the background tasks. If not provided this defaults to the main process or your single `media_repository` worker. + +_Added in Synapse 1.16.0._ + +Defaults to `null`. Example configuration: ```yaml media_instance_running_background_jobs: worker1 ``` - -_Added in Synapse 1.16.0._ - --- ### `redis` -Configuration for Redis when using workers. This *must* be enabled when using workers. +*(object)* Configuration for Redis when using workers. This *must* be enabled when using workers. + +_Added in Synapse 1.78.0._ + +_Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_ + +_Changed in Synapse 1.85.0: Added path option to use a local Unix socket_ + +_Changed in Synapse 1.116.0: Added password\_path_ + This setting has the following sub-options: -* `enabled`: whether to use Redis support. Defaults to false. -* `host` and `port`: Optional host and port to use to connect to redis. Defaults to - localhost and 6379 -* `path`: The full path to a local Unix socket file. **If this is used, `host` and - `port` are ignored.** Defaults to `/tmp/redis.sock' -* `password`: Optional password if configured on the Redis instance. -* `dbid`: Optional redis dbid if needs to connect to specific redis logical db. -* `use_tls`: Whether to use tls connection. Defaults to false. -* `certificate_file`: Optional path to the certificate file -* `private_key_file`: Optional path to the private key file -* `ca_file`: Optional path to the CA certificate file. Use this one or: -* `ca_path`: Optional path to the folder containing the CA certificate file - _Added in Synapse 1.78.0._ +* `enabled` (boolean): Whether to use Redis support. Defaults to `false`. + +* `host` (string): Optional host to use to connect to Redis. Defaults to `"localhost"`. + +* `port` (integer): Optional port to use to connect to Redis. Defaults to `6379`. + +* `path` (string): The full path to a local Unix socket file. **If this is used, `host` and `port` are ignored.** Defaults to `"/tmp/redis.sock"`. + +* `password` (string|null): Optional password if configured on the Redis instance. Defaults to `null`. + +* `password_path` (string|null): Alternative to `password`, reading the password from an external file. The file should be a plain text file, containing only the password. Synapse reads the password from the given file once at startup. Defaults to `null`. - _Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, private\_key\_file, ca\_file and ca\_path attributes_ +* `dbid` (string|null): Optional redis dbid if needs to connect to specific redis logical db. Defaults to `null`. - _Changed in Synapse 1.85.0: Added path option to use a local Unix socket_ +* `use_tls` (boolean): Whether to use a TLS connection. Defaults to `false`. + +* `certificate_file` (string|null): Optional path to the certificate file. Defaults to `null`. + +* `private_key_file` (string|null): Optional path to the private key file. Defaults to `null`. + +* `ca_file` (string|null): Optional path to the CA certificate file. Use this one or `ca_path` Defaults to `null`. + +* `ca_path` (string|null): Optional path to the folder containing the CA certificate file. Use this one or `ca_file` Defaults to `null`. Example configuration: ```yaml @@ -4507,31 +4090,26 @@ redis: enabled: true host: localhost port: 6379 - password: <secret_password> + password_path: <path_to_the_password_file> dbid: <dbid> - #use_tls: True - #certificate_file: <path_to_the_certificate_file> - #private_key_file: <path_to_the_private_key_file> - #ca_file: <path_to_the_ca_certificate_file> ``` --- ## Individual worker configuration -These options configure an individual worker, in its worker configuration file. -They should be not be provided when configuring the main process. -Note also the configuration above for -[coordinating a cluster of workers](#coordinating-workers). +These options configure an individual worker, in its worker configuration file. They should be not be provided when configuring the main process. + +Note also the configuration above for [coordinating a cluster of workers](#coordinating-workers). For guidance on setting up workers, see the [worker documentation](../../workers.md). --- ### `worker_app` -The type of worker. The currently available worker applications are listed -in [worker documentation](../../workers.md#available-worker-applications). +*(string)* The type of worker. The currently available worker applications are listed in [worker documentation](../../workers.md#available-worker-applications). + +The most common worker is the [`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker). -The most common worker is the -[`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker). +There is no default for this option. Example configuration: ```yaml @@ -4540,9 +4118,7 @@ worker_app: synapse.app.generic_worker --- ### `worker_name` -A unique name for the worker. The worker needs a name to be addressed in -further parameters and identification in log files. We strongly recommend -giving each worker a unique `worker_name`. +*(string)* A unique name for the worker. The worker needs a name to be addressed in further parameters and identification in log files. We strongly recommend giving each worker a unique `worker_name`. There is no default for this option. Example configuration: ```yaml @@ -4551,46 +4127,45 @@ worker_name: generic_worker1 --- ### `worker_listeners` -A worker can handle HTTP requests. To do so, a `worker_listeners` option -must be declared, in the same way as the [`listeners` option](#listeners) -in the shared config. +*(array)* A worker can handle HTTP requests. To do so, a `worker_listeners` option must be declared, in the same way as the [`listeners` option](#listeners) in the shared config. -Workers declared in [`stream_writers`](#stream_writers) and [`instance_map`](#instance_map) - will need to include a `replication` listener here, in order to accept internal HTTP -requests from other workers. +Workers declared in [`stream_writers`](#stream_writers) and [`instance_map`](#instance_map) will need to include a `replication` listener here, in order to accept internal HTTP requests from other workers. -Example configuration: +Example #2 is using UNIX sockets with a `replication` listener. + +Defaults to `[]`. + +Example configurations: ```yaml worker_listeners: - - type: http - port: 8083 - resources: - - names: [client, federation] +- type: http + port: 8083 + resources: + - names: + - client + - federation ``` -Example configuration(#2, using UNIX sockets with a `replication` listener): + ```yaml worker_listeners: - - type: http - path: /run/synapse/worker_replication.sock - resources: - - names: [replication] - - type: http - path: /run/synapse/worker_public.sock - resources: - - names: [client, federation] +- type: http + path: /run/synapse/worker_replication.sock + resources: + - names: + - replication +- type: http + path: /run/synapse/worker_public.sock + resources: + - names: + - client + - federation ``` --- ### `worker_manhole` -A worker may have a listener for [`manhole`](../../manhole.md). -It allows server administrators to access a Python shell on the worker. - -Example configuration: -```yaml -worker_manhole: 9000 -``` +*(integer|null)* A worker may have a listener for [`manhole`](../../manhole.md). It allows server administrators to access a Python shell on the worker. -This is a short form for: +The example below is a short form for ```yaml worker_listeners: - port: 9000 @@ -4600,14 +4175,16 @@ worker_listeners: It needs also an additional [`manhole_settings`](#manhole_settings) configuration. +Defaults to `null`. + +Example configuration: +```yaml +worker_manhole: 9000 +``` --- ### `worker_daemonize` -Specifies whether the worker should be started as a daemon process. -If Synapse is being managed by [systemd](../../systemd-with-workers/), this option -must be omitted or set to `false`. - -Defaults to `false`. +*(boolean)* Specifies whether the worker should be started as a daemon process. If Synapse is being managed by [systemd](../../systemd-with-workers/), this option must be omitted or set to `false`. Defaults to `false`. Example configuration: ```yaml @@ -4616,15 +4193,14 @@ worker_daemonize: true --- ### `worker_pid_file` -When running a worker as a daemon, we need a place to store the -[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker. -This option defines the location of that "pid file". +*(string|null)* When running a worker as a daemon, we need a place to store the [PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker. This option defines the location of that "pid file". -This option is required if `worker_daemonize` is `true` and ignored -otherwise. It has no default. +This option is required if `worker_daemonize` is `true` and ignored otherwise. See also the [`pid_file` option](#pid_file) option for the main Synapse process. +Defaults to `null`. + Example configuration: ```yaml worker_pid_file: DATADIR/generic_worker1.pid @@ -4632,9 +4208,7 @@ worker_pid_file: DATADIR/generic_worker1.pid --- ### `worker_log_config` -This option specifies a yaml python logging config file as described -[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema). -See also the [`log_config` option](#log_config) option for the main Synapse process. +*(string|null)* This option specifies a yaml python logging config file as described [here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema). See also the [`log_config` option](#log_config) option for the main Synapse process. Defaults to `null`. Example configuration: ```yaml @@ -4642,62 +4216,62 @@ worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml ``` --- ## Background Updates + Configuration settings related to background updates. --- ### `background_updates` -Background updates are database updates that are run in the background in batches. -The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to -sleep can all be configured. This is helpful to speed up or slow down the updates. +*(object)* Background updates are database updates that are run in the background in batches. The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to sleep can all be configured. This is helpful to speed up or slow down the updates. + This setting has the following sub-options: -* `background_update_duration_ms`: How long in milliseconds to run a batch of background updates for. Defaults to 100. - Set a different time to change the default. -* `sleep_enabled`: Whether to sleep between updates. Defaults to true. Set to false to change the default. -* `sleep_duration_ms`: If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. - Set a duration to change the default. -* `min_batch_size`: Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. - Set a size to change the default. -* `default_batch_size`: The batch size to use for the first iteration of a new background update. The default is 100. - Set a size to change the default. + +* `background_update_duration_ms` (integer): How long in milliseconds to run a batch of background updates for. Defaults to `100`. + +* `sleep_enabled` (boolean): Whether to sleep between updates. Defaults to `true`. + +* `sleep_duration_ms` (integer): If sleeping between updates, how long in milliseconds to sleep for. Defaults to `1000`. + +* `min_batch_size` (integer): Minimum size a batch of background updates can be. Must be greater than 0. Defaults to `1`. + +* `default_batch_size` (integer): The batch size to use for the first iteration of a new background update. Defaults to `100`. Example configuration: ```yaml background_updates: - background_update_duration_ms: 500 - sleep_enabled: false - sleep_duration_ms: 300 - min_batch_size: 10 - default_batch_size: 50 + background_update_duration_ms: 500 + sleep_enabled: false + sleep_duration_ms: 300 + min_batch_size: 10 + default_batch_size: 50 ``` --- ## Auto Accept Invites + Configuration settings related to automatically accepting invites. --- ### `auto_accept_invites` -Automatically accepting invites controls whether users are presented with an invite request or if they -are instead automatically joined to a room when receiving an invite. Set the `enabled` sub-option to true to -enable auto-accepting invites. Defaults to false. +*(object)* Automatically accepting invites controls whether users are presented with an invite request or if they are instead automatically joined to a room when receiving an invite. Set the `enabled` sub-option to true to enable auto-accepting invites. + +NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed. The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join events could be generated from a single invite. + This setting has the following sub-options: -* `enabled`: Whether to run the auto-accept invites logic. Defaults to false. -* `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only - for direct messages. Defaults to false. -* `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false. -* `worker_to_run_on`: Which worker to run this module on. This must match - the "worker_name". If not set or `null`, invites will be accepted on the - main process. -NOTE: Care should be taken not to enable this setting if the `synapse_auto_accept_invite` module is enabled and installed. -The two modules will compete to perform the same task and may result in undesired behaviour. For example, multiple join -events could be generated from a single invite. +* `enabled` (boolean): Whether to run the auto-accept invites logic. Defaults to `false`. + +* `only_for_direct_messages` (boolean): Whether invites should be automatically accepted for all room types, or only for direct messages. Defaults to `false`. + +* `only_from_local_users` (boolean): Whether to only automatically accept invites from users on this homeserver. Defaults to `false`. + +* `worker_to_run_on` (string|null): Which worker to run this module on. This must match the "worker_name". If not set or `null`, invites will be accepted on the main process. Defaults to `null`. Example configuration: ```yaml auto_accept_invites: - enabled: true - only_for_direct_messages: true - only_from_local_users: true - worker_to_run_on: "worker_1" + enabled: true + only_for_direct_messages: true + only_from_local_users: true + worker_to_run_on: worker_1 ``` diff --git a/docs/usage/configuration/user_authentication/README.md b/docs/usage/configuration/user_authentication/README.md
index 087ae053cf..644ca66445 100644 --- a/docs/usage/configuration/user_authentication/README.md +++ b/docs/usage/configuration/user_authentication/README.md
@@ -7,7 +7,7 @@ Included in Synapse is support for authenticating users via: * A username and password. * An email address and password. -* Single Sign-On through the SAML, Open ID Connect or CAS protocols. +* Single Sign-On through the Open ID Connect protocol. * JSON Web Tokens. * An administrator's shared secret. diff --git a/docs/usage/configuration/user_authentication/single_sign_on/README.md b/docs/usage/configuration/user_authentication/single_sign_on/README.md
index b94aad92cf..b6e0b080b5 100644 --- a/docs/usage/configuration/user_authentication/single_sign_on/README.md +++ b/docs/usage/configuration/user_authentication/single_sign_on/README.md
@@ -1,5 +1,7 @@ # Single Sign-On -Synapse supports single sign-on through the SAML, Open ID Connect or CAS protocols. +Synapse supports single sign-on through the Open ID Connect protocol. LDAP and other login methods are supported through first and third-party password -auth provider modules. \ No newline at end of file +auth provider modules. + +Note that this patchset removes SAML and CAS protocol support. \ No newline at end of file diff --git a/docs/usage/configuration/user_authentication/single_sign_on/cas.md b/docs/usage/configuration/user_authentication/single_sign_on/cas.md deleted file mode 100644
index 899face876..0000000000 --- a/docs/usage/configuration/user_authentication/single_sign_on/cas.md +++ /dev/null
@@ -1,8 +0,0 @@ -# CAS - -Synapse supports authenticating users via the [Central Authentication -Service protocol](https://en.wikipedia.org/wiki/Central_Authentication_Service) -(CAS) natively. - -Please see the [cas_config](../../../configuration/config_documentation.md#cas_config) and [sso](../../../configuration/config_documentation.md#sso) -sections of the configuration manual for more details. \ No newline at end of file diff --git a/docs/usage/configuration/user_authentication/single_sign_on/saml.md b/docs/usage/configuration/user_authentication/single_sign_on/saml.md deleted file mode 100644
index 2b6f052cc1..0000000000 --- a/docs/usage/configuration/user_authentication/single_sign_on/saml.md +++ /dev/null
@@ -1,8 +0,0 @@ -# SAML - -Synapse supports authenticating users via the [Security Assertion -Markup Language](https://en.wikipedia.org/wiki/Security_Assertion_Markup_Language) -(SAML) protocol natively. - -Please see the `saml2_config` and `sso` sections of the [Synapse configuration -file](../../../configuration/homeserver_sample_config.md) for more details. \ No newline at end of file diff --git a/docs/workers.md b/docs/workers.md
index fbf539fa7e..5397f36280 100644 --- a/docs/workers.md +++ b/docs/workers.md
@@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou You can start the main Synapse process with Poetry by running the following command: ```console -poetry run synapse_homeserver --config-file [your homeserver.yaml] +poetry run synapse_homeserver --config-path [your homeserver.yaml] ``` For worker setups, you can run the following command ```console -poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml] +poetry run synapse_worker --config-path [your homeserver.yaml] --config-path [your worker.yaml] ``` ## Available worker applications @@ -200,6 +200,7 @@ information. ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ # Federation requests + ^/_matrix/federation/v1/version$ ^/_matrix/federation/v1/event/ ^/_matrix/federation/v1/state/ ^/_matrix/federation/v1/state_ids/ @@ -249,13 +250,14 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$ ^/_matrix/client/(r0|v3|unstable)/capabilities$ ^/_matrix/client/(r0|v3|unstable)/notifications$ + ^/_synapse/admin/v1/rooms/ # Encryption requests ^/_matrix/client/(r0|v3|unstable)/keys/query$ ^/_matrix/client/(r0|v3|unstable)/keys/changes$ ^/_matrix/client/(r0|v3|unstable)/keys/claim$ ^/_matrix/client/(r0|v3|unstable)/room_keys/ - ^/_matrix/client/(r0|v3|unstable)/keys/upload/ + ^/_matrix/client/(r0|v3|unstable)/keys/upload$ # Registration/login requests ^/_matrix/client/(api/v1|r0|v3|unstable)/login$ @@ -273,23 +275,21 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/knock/ ^/_matrix/client/(api/v1|r0|v3|unstable)/profile/ - # Account data requests - ^/_matrix/client/(r0|v3|unstable)/.*/tags - ^/_matrix/client/(r0|v3|unstable)/.*/account_data - - # Receipts requests - ^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt - ^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers - - # Presence requests - ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ - # User directory search requests ^/_matrix/client/(r0|v3|unstable)/user_directory/search$ Additionally, the following REST endpoints can be handled for GET requests: ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/ + ^/_matrix/client/unstable/org.matrix.msc4140/delayed_events + ^/_matrix/client/(api/v1|r0|v3|unstable)/devices/ + + # Account data requests + ^/_matrix/client/(r0|v3|unstable)/.*/tags + ^/_matrix/client/(r0|v3|unstable)/.*/account_data + + # Presence requests + ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ Pagination requests can also be handled, but all requests for a given room must be routed to the same instance. Additionally, care must be taken to @@ -312,17 +312,20 @@ using): # OpenID Connect requests. ^/_synapse/client/oidc/callback$ - # SAML requests. - ^/_synapse/client/saml2/authn_response$ - - # CAS requests. - ^/_matrix/client/(api/v1|r0|v3|unstable)/login/cas/ticket$ - Ensure that all SSO logins go to a single process. For multiple workers not handling the SSO endpoints properly, see [#7530](https://github.com/matrix-org/synapse/issues/7530) and [#9427](https://github.com/matrix-org/synapse/issues/9427). +Additionally, when MSC3861 is enabled (`experimental_features.msc3861.enabled` +set to `true`), the following endpoints can be handled by the worker: + + ^/_synapse/admin/v2/users/[^/]+$ + ^/_synapse/admin/v1/username_available$ + ^/_synapse/admin/v1/users/[^/]+/_allow_cross_signing_replacement_without_uia$ + # Only the GET method: + ^/_synapse/admin/v1/users/[^/]+/devices$ + Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners) with `client` and `federation` `resources` must be configured in the [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) diff --git a/flake.lock b/flake.lock
index 9b360fa33e..4e2f01153b 100644 --- a/flake.lock +++ b/flake.lock
@@ -39,33 +39,12 @@ } }, "flake-utils": { - "inputs": { - "systems": "systems" - }, "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "lastModified": 1667395993, + "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", "owner": "numtide", "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_2": { - "inputs": { - "systems": "systems_2" - }, - "locked": { - "lastModified": 1681202837, - "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "cfacdce06f30d2b68473a46042957675eebb3401", + "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", "type": "github" }, "original": { @@ -170,43 +149,43 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "lastModified": 1678872516, + "narHash": "sha256-/E1YwtMtFAu2KUQKV/1+KFuReYPANM2Rzehk84VxVoc=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "rev": "9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.05", + "ref": "nixos-22.11", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1690535733, - "narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=", + "lastModified": 1748217807, + "narHash": "sha256-P3u2PXxMlo49PutQLnk2PhI/imC69hFl1yY4aT5Nax8=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a", + "rev": "3108eaa516ae22c2360928589731a4f1581526ef", "type": "github" }, "original": { "owner": "NixOS", - "ref": "master", + "ref": "nixpkgs-unstable", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_3": { "locked": { - "lastModified": 1681358109, - "narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=", + "lastModified": 1744536153, + "narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9", + "rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11", "type": "github" }, "original": { @@ -231,11 +210,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1688056373, - "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", + "lastModified": 1686050334, + "narHash": "sha256-R0mczWjDzBpIvM3XXhO908X5e2CQqjyh/gFbwZk/7/Q=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", + "rev": "6881eb2ae5d8a3516e34714e7a90d9d95914c4dc", "type": "github" }, "original": { @@ -249,20 +228,19 @@ "devenv": "devenv", "nixpkgs": "nixpkgs_2", "rust-overlay": "rust-overlay", - "systems": "systems_3" + "systems": "systems" } }, "rust-overlay": { "inputs": { - "flake-utils": "flake-utils_2", "nixpkgs": "nixpkgs_3" }, "locked": { - "lastModified": 1693966243, - "narHash": "sha256-a2CA1aMIPE67JWSVIGoGtD3EGlFdK9+OlJQs0FOWCKY=", + "lastModified": 1748313401, + "narHash": "sha256-x5UuDKP2Ui/TresAngUo9U4Ss9xfOmN8dAXU8OrkZmA=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "a8b4bb4cbb744baaabc3e69099f352f99164e2c1", + "rev": "9c8ea175cf9af29edbcff121512e44092a8f37e4", "type": "github" }, "original": { @@ -285,36 +263,6 @@ "repo": "default", "type": "github" } - }, - "systems_2": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - }, - "systems_3": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } } }, "root": "root", diff --git a/flake.nix b/flake.nix
index 31f2832939..e33b233ece 100644 --- a/flake.nix +++ b/flake.nix
@@ -3,13 +3,13 @@ # (https://github.com/matrix-org/complement) Matrix homeserver test suites are also # installed automatically. # -# You must have already installed Nix (https://nixos.org) on your system to use this. -# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not -# directly supported, but Nix can be installed inside of WSL2 or even Docker +# You must have already installed Nix (https://nixos.org/download/) on your system to use this. +# Nix can be installed on any Linux distribiution or MacOS; NixOS is not required. +# Windows is not directly supported, but Nix can be installed inside of WSL2 or even Docker # containers. Please refer to https://nixos.org/download for details. # # You must also enable support for flakes in Nix. See the following for how to -# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes +# do so permanently: https://wiki.nixos.org/wiki/Flakes#Other_Distros,_without_Home-Manager # # Be warned: you'll need over 3.75 GB of free space to download all the dependencies. # @@ -20,7 +20,7 @@ # locally from "services", such as PostgreSQL and Redis. # # You should now be dropped into a new shell with all programs and dependencies -# availabile to you! +# available to you! # # You can start up pre-configured local Synapse, PostgreSQL and Redis instances by # running: `devenv up`. To stop them, use Ctrl-C. @@ -39,9 +39,9 @@ { inputs = { - # Use the master/unstable branch of nixpkgs. Used to fetch the latest + # Use the rolling/unstable branch of nixpkgs. Used to fetch the latest # available versions of packages. - nixpkgs.url = "github:NixOS/nixpkgs/master"; + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; # Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS). systems.url = "github:nix-systems/default"; # A development environment manager built on Nix. See https://devenv.sh. @@ -50,7 +50,7 @@ rust-overlay.url = "github:oxalica/rust-overlay"; }; - outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs: + outputs = { nixpkgs, devenv, systems, rust-overlay, ... } @ inputs: let forEachSystem = nixpkgs.lib.genAttrs (import systems); in { @@ -82,7 +82,7 @@ # # NOTE: We currently need to set the Rust version unnecessarily high # in order to work around https://github.com/matrix-org/synapse/issues/15939 - (rust-bin.stable."1.71.1".default.override { + (rust-bin.stable."1.87.0".default.override { # Additionally install the "rust-src" extension to allow diving into the # Rust source code in an IDE (rust-analyzer will also make use of it). extensions = [ "rust-src" ]; @@ -118,6 +118,8 @@ # For releasing Synapse debian-devscripts # (`dch` for manipulating the Debian changelog) libnotify # (the release script uses `notify-send` to tell you when CI jobs are done) + + postgresql.pg_config ]; # Install Python and manage a virtualenv with Poetry. @@ -126,7 +128,7 @@ # Automatically activate the poetry virtualenv upon entering the shell. languages.python.poetry.activate.enable = true; # Install all extra Python dependencies; this is needed to run the unit - # tests and utilitise all Synapse features. + # tests and utilise all Synapse features. languages.python.poetry.install.arguments = ["--extras all"]; # Install the 'matrix-synapse' package from the local checkout. languages.python.poetry.install.installRootPackage = true; @@ -140,6 +142,9 @@ # force compiling those binaries locally instead. env.POETRY_INSTALLER_NO_BINARY = "ruff"; + # Required to make git work + env.CARGO_NET_GIT_FETCH_WITH_CLI = "true"; + # Install dependencies for the additional programming languages # involved with Synapse development. # @@ -160,11 +165,14 @@ services.postgres.initialDatabases = [ { name = "synapse"; } ]; + + services.postgres.port = 5433; + # Create a postgres user called 'synapse_user' which has ownership # over the 'synapse' database. services.postgres.initialScript = '' - CREATE USER synapse_user; - ALTER DATABASE synapse OWNER TO synapse_user; + CREATE USER synapse_user; + ALTER DATABASE synapse OWNER TO synapse_user; ''; # Redis is needed in order to run Synapse in worker mode. @@ -205,7 +213,7 @@ # corresponding Nix packages on https://search.nixos.org/packages. # # This was done until `./install-deps.pl --dryrun` produced no output. - env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [ + env.PERL5LIB = "${with pkgs.perl538Packages; makePerlPath [ DBI ClassMethodModifiers CryptEd25519 diff --git a/mypy.ini b/mypy.ini
index 3fca15c01b..0a0e33361c 100644 --- a/mypy.ini +++ b/mypy.ini
@@ -26,7 +26,7 @@ strict_equality = True # Run mypy type checking with the minimum supported Python version to catch new usage # that isn't backwards-compatible (types, overloads, etc). -python_version = 3.8 +python_version = 3.9 files = docker/, @@ -87,9 +87,6 @@ ignore_missing_imports = True [mypy-rust_python_jaeger_reporter.*] ignore_missing_imports = True -[mypy-saml2.*] -ignore_missing_imports = True - [mypy-srvlookup.*] ignore_missing_imports = True diff --git a/poetry.lock b/poetry.lock
index 278bd6cb6e..cbed01b564 100644 --- a/poetry.lock +++ b/poetry.lock
@@ -1,47 +1,48 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-types" -version = "0.5.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["main", "dev"] files = [ - {file = "annotated_types-0.5.0-py3-none-any.whl", hash = "sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd"}, - {file = "annotated_types-0.5.0.tar.gz", hash = "sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "attrs" -version = "23.2.0" +version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.9\" and python_version < \"3.13\""] [[package]] name = "authlib" -version = "1.3.1" +version = "1.5.2" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\"" files = [ - {file = "Authlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:d35800b973099bbadc49b42b256ecb80041ad56b7fe1216a362c7943c088f377"}, - {file = "authlib-1.3.1.tar.gz", hash = "sha256:7ae843f03c06c5c0debd63c9db91f9fda64fa62a42a77419fa15fbb7e7a58917"}, + {file = "authlib-1.5.2-py2.py3-none-any.whl", hash = "sha256:8804dd4402ac5e4a0435ac49e0b6e19e395357cfa632a3f624dcb4f6df13b4b1"}, + {file = "authlib-1.5.2.tar.gz", hash = "sha256:fe85ec7e50c5f86f1e2603518bb3b4f632985eb4a355e52256530790e326c512"}, ] [package.dependencies] @@ -53,6 +54,7 @@ version = "22.10.0" description = "Self-service finite-state machines for the programmer on the go." optional = false python-versions = "*" +groups = ["main"] files = [ {file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"}, {file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"}, @@ -67,38 +69,63 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"] [[package]] name = "bcrypt" -version = "4.2.0" +version = "4.3.0" description = "Modern password hashing for your software and your servers" optional = false -python-versions = ">=3.7" -files = [ - {file = "bcrypt-4.2.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb"}, - {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00"}, - {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d84cf6d877918620b687b8fd1bf7781d11e8a0998f576c7aa939776b512b98d"}, - {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:1bb429fedbe0249465cdd85a58e8376f31bb315e484f16e68ca4c786dcc04291"}, - {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:655ea221910bcac76ea08aaa76df427ef8625f92e55a8ee44fbf7753dbabb328"}, - {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:1ee38e858bf5d0287c39b7a1fc59eec64bbf880c7d504d3a06a96c16e14058e7"}, - {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0da52759f7f30e83f1e30a888d9163a81353ef224d82dc58eb5bb52efcabc399"}, - {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3698393a1b1f1fd5714524193849d0c6d524d33523acca37cd28f02899285060"}, - {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:762a2c5fb35f89606a9fde5e51392dad0cd1ab7ae64149a8b935fe8d79dd5ed7"}, - {file = "bcrypt-4.2.0-cp37-abi3-win32.whl", hash = "sha256:5a1e8aa9b28ae28020a3ac4b053117fb51c57a010b9f969603ed885f23841458"}, - {file = "bcrypt-4.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:8f6ede91359e5df88d1f5c1ef47428a4420136f3ce97763e31b86dd8280fbdf5"}, - {file = "bcrypt-4.2.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:c52aac18ea1f4a4f65963ea4f9530c306b56ccd0c6f8c8da0c06976e34a6e841"}, - {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bbbfb2734f0e4f37c5136130405332640a1e46e6b23e000eeff2ba8d005da68"}, - {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3413bd60460f76097ee2e0a493ccebe4a7601918219c02f503984f0a7ee0aebe"}, - {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8d7bb9c42801035e61c109c345a28ed7e84426ae4865511eb82e913df18f58c2"}, - {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3d3a6d28cb2305b43feac298774b997e372e56c7c7afd90a12b3dc49b189151c"}, - {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9c1c4ad86351339c5f320ca372dfba6cb6beb25e8efc659bedd918d921956bae"}, - {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:27fe0f57bb5573104b5a6de5e4153c60814c711b29364c10a75a54bb6d7ff48d"}, - {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:8ac68872c82f1add6a20bd489870c71b00ebacd2e9134a8aa3f98a0052ab4b0e"}, - {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8"}, - {file = "bcrypt-4.2.0-cp39-abi3-win32.whl", hash = "sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34"}, - {file = "bcrypt-4.2.0-cp39-abi3-win_amd64.whl", hash = "sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9"}, - {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a"}, - {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db"}, - {file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:1ff39b78a52cf03fdf902635e4c81e544714861ba3f0efc56558979dd4f09170"}, - {file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184"}, - {file = "bcrypt-4.2.0.tar.gz", hash = "sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221"}, +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "bcrypt-4.3.0-cp313-cp313t-macosx_10_12_universal2.whl", hash = "sha256:f01e060f14b6b57bbb72fc5b4a83ac21c443c9a2ee708e04a10e9192f90a6281"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5eeac541cefd0bb887a371ef73c62c3cd78535e4887b310626036a7c0a817bb"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59e1aa0e2cd871b08ca146ed08445038f42ff75968c7ae50d2fdd7860ade2180"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:0042b2e342e9ae3d2ed22727c1262f76cc4f345683b5c1715f0250cf4277294f"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74a8d21a09f5e025a9a23e7c0fd2c7fe8e7503e4d356c0a2c1486ba010619f09"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:0142b2cb84a009f8452c8c5a33ace5e3dfec4159e7735f5afe9a4d50a8ea722d"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_aarch64.whl", hash = "sha256:12fa6ce40cde3f0b899729dbd7d5e8811cb892d31b6f7d0334a1f37748b789fd"}, + {file = "bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_x86_64.whl", hash = "sha256:5bd3cca1f2aa5dbcf39e2aa13dd094ea181f48959e1071265de49cc2b82525af"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:335a420cfd63fc5bc27308e929bee231c15c85cc4c496610ffb17923abf7f231"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:0e30e5e67aed0187a1764911af023043b4542e70a7461ad20e837e94d23e1d6c"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b8d62290ebefd49ee0b3ce7500f5dbdcf13b81402c05f6dafab9a1e1b27212f"}, + {file = "bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2ef6630e0ec01376f59a006dc72918b1bf436c3b571b80fa1968d775fa02fe7d"}, + {file = "bcrypt-4.3.0-cp313-cp313t-win32.whl", hash = "sha256:7a4be4cbf241afee43f1c3969b9103a41b40bcb3a3f467ab19f891d9bc4642e4"}, + {file = "bcrypt-4.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c1949bf259a388863ced887c7861da1df681cb2388645766c89fdfd9004c669"}, + {file = "bcrypt-4.3.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:f81b0ed2639568bf14749112298f9e4e2b28853dab50a8b357e31798686a036d"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:864f8f19adbe13b7de11ba15d85d4a428c7e2f344bac110f667676a0ff84924b"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e36506d001e93bffe59754397572f21bb5dc7c83f54454c990c74a468cd589e"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:842d08d75d9fe9fb94b18b071090220697f9f184d4547179b60734846461ed59"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7c03296b85cb87db865d91da79bf63d5609284fc0cab9472fdd8367bbd830753"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62f26585e8b219cdc909b6a0069efc5e4267e25d4a3770a364ac58024f62a761"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:beeefe437218a65322fbd0069eb437e7c98137e08f22c4660ac2dc795c31f8bb"}, + {file = "bcrypt-4.3.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:97eea7408db3a5bcce4a55d13245ab3fa566e23b4c67cd227062bb49e26c585d"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:191354ebfe305e84f344c5964c7cd5f924a3bfc5d405c75ad07f232b6dffb49f"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:41261d64150858eeb5ff43c753c4b216991e0ae16614a308a15d909503617732"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:33752b1ba962ee793fa2b6321404bf20011fe45b9afd2a842139de3011898fef"}, + {file = "bcrypt-4.3.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:50e6e80a4bfd23a25f5c05b90167c19030cf9f87930f7cb2eacb99f45d1c3304"}, + {file = "bcrypt-4.3.0-cp38-abi3-win32.whl", hash = "sha256:67a561c4d9fb9465ec866177e7aebcad08fe23aaf6fbd692a6fab69088abfc51"}, + {file = "bcrypt-4.3.0-cp38-abi3-win_amd64.whl", hash = "sha256:584027857bc2843772114717a7490a37f68da563b3620f78a849bcb54dc11e62"}, + {file = "bcrypt-4.3.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d3efb1157edebfd9128e4e46e2ac1a64e0c1fe46fb023158a407c7892b0f8c3"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08bacc884fd302b611226c01014eca277d48f0a05187666bca23aac0dad6fe24"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6746e6fec103fcd509b96bacdfdaa2fbde9a553245dbada284435173a6f1aef"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:afe327968aaf13fc143a56a3360cb27d4ad0345e34da12c7290f1b00b8fe9a8b"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d9af79d322e735b1fc33404b5765108ae0ff232d4b54666d46730f8ac1a43676"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f1e3ffa1365e8702dc48c8b360fef8d7afeca482809c5e45e653af82ccd088c1"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3004df1b323d10021fda07a813fd33e0fd57bef0e9a480bb143877f6cba996fe"}, + {file = "bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:531457e5c839d8caea9b589a1bcfe3756b0547d7814e9ce3d437f17da75c32b0"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:17a854d9a7a476a89dcef6c8bd119ad23e0f82557afbd2c442777a16408e614f"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6fb1fd3ab08c0cbc6826a2e0447610c6f09e983a281b919ed721ad32236b8b23"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e965a9c1e9a393b8005031ff52583cedc15b7884fce7deb8b0346388837d6cfe"}, + {file = "bcrypt-4.3.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:79e70b8342a33b52b55d93b3a59223a844962bef479f6a0ea318ebbcadf71505"}, + {file = "bcrypt-4.3.0-cp39-abi3-win32.whl", hash = "sha256:b4d4e57f0a63fd0b358eb765063ff661328f69a04494427265950c71b992a39a"}, + {file = "bcrypt-4.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:e53e074b120f2877a35cc6c736b8eb161377caae8925c17688bd46ba56daaa5b"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c950d682f0952bafcceaf709761da0a32a942272fad381081b51096ffa46cea1"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:107d53b5c67e0bbc3f03ebf5b030e0403d24dda980f8e244795335ba7b4a027d"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:b693dbb82b3c27a1604a3dff5bfc5418a7e6a781bb795288141e5f80cf3a3492"}, + {file = "bcrypt-4.3.0-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:b6354d3760fcd31994a14c89659dee887f1351a06e5dac3c1142307172a79f90"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a839320bf27d474e52ef8cb16449bb2ce0ba03ca9f44daba6d93fa1d8828e48a"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:bdc6a24e754a555d7316fa4774e64c6c3997d27ed2d1964d55920c7c227bc4ce"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:55a935b8e9a1d2def0626c4269db3fcd26728cbff1e84f0341465c31c4ee56d8"}, + {file = "bcrypt-4.3.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:57967b7a28d855313a963aaea51bf6df89f833db4320da458e5b3c5ab6d4c938"}, + {file = "bcrypt-4.3.0.tar.gz", hash = "sha256:3a3fd2204178b6d2adcf09cb4f6426ffef54762577a7c9b54c159008cb288c18"}, ] [package.extras] @@ -106,68 +133,22 @@ tests = ["pytest (>=3.2.1,!=3.3.0)"] typecheck = ["mypy"] [[package]] -name = "black" -version = "24.8.0" -description = "The uncompromising code formatter." -optional = false -python-versions = ">=3.8" -files = [ - {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"}, - {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"}, - {file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"}, - {file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"}, - {file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"}, - {file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"}, - {file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"}, - {file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"}, - {file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"}, - {file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"}, - {file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"}, - {file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"}, - {file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"}, - {file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"}, - {file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"}, - {file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"}, - {file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"}, - {file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"}, - {file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"}, - {file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"}, - {file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"}, - {file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"}, -] - -[package.dependencies] -click = ">=8.0.0" -mypy-extensions = ">=0.4.3" -packaging = ">=22.0" -pathspec = ">=0.9.0" -platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} - -[package.extras] -colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] -jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] -uvloop = ["uvloop (>=0.15.2)"] - -[[package]] name = "bleach" -version = "6.1.0" +version = "6.2.0" description = "An easy safelist-based HTML-sanitizing tool." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main", "dev"] files = [ - {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, - {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, + {file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"}, + {file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"}, ] [package.dependencies] -six = ">=1.9.0" webencodings = "*" [package.extras] -css = ["tinycss2 (>=1.1.0,<1.3)"] +css = ["tinycss2 (>=1.1.0,<1.5)"] [[package]] name = "canonicaljson" @@ -175,6 +156,7 @@ version = "2.0.0" description = "Canonical JSON" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "canonicaljson-2.0.0-py3-none-any.whl", hash = "sha256:c38a315de3b5a0532f1ec1f9153cd3d716abfc565a558d00a4835428a34fca5b"}, {file = "canonicaljson-2.0.0.tar.gz", hash = "sha256:e2fdaef1d7fadc5d9cb59bd3d0d41b064ddda697809ac4325dced721d12f113f"}, @@ -186,6 +168,7 @@ version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, @@ -193,75 +176,79 @@ files = [ [[package]] name = "cffi" -version = "1.15.1" +version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = false -python-versions = "*" -files = [ - {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, - {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, - {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, - {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, - {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, - {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, - {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, - {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, - {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, - {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, - {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, - {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, - {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, - {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, - {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, - {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, - {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, - {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, - {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] [package.dependencies] @@ -273,6 +260,7 @@ version = "3.1.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" +groups = ["main", "dev"] files = [ {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, @@ -353,13 +341,14 @@ files = [ [[package]] name = "click" -version = "8.1.7" +version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ - {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, - {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, ] [package.dependencies] @@ -371,6 +360,8 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "platform_system == \"Windows\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -382,6 +373,7 @@ version = "0.9.1" description = "Python parser for the CommonMark Markdown spec" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, @@ -396,6 +388,7 @@ version = "15.1.0" description = "Symbolic constants in Python" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "constantly-15.1.0-py2.py3-none-any.whl", hash = "sha256:dd2fa9d6b1a51a83f0d7dd76293d734046aa176e384bf6e33b7e44880eb37c5d"}, {file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"}, @@ -403,43 +396,39 @@ files = [ [[package]] name = "cryptography" -version = "42.0.8" +version = "43.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" -files = [ - {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"}, - {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"}, - {file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"}, - {file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"}, - {file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"}, - {file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"}, - {file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"}, - {file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"}, +groups = ["main", "dev"] +files = [ + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, ] [package.dependencies] @@ -452,7 +441,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] @@ -461,6 +450,8 @@ version = "0.7.1" description = "XML bomb protection for Python stdlib modules" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +groups = ["main"] +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, @@ -472,6 +463,7 @@ version = "1.2.13" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["dev"] files = [ {file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"}, {file = "Deprecated-1.2.13.tar.gz", hash = "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d"}, @@ -481,7 +473,7 @@ files = [ wrapt = ">=1.10,<2" [package.extras] -dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version (<1)", "configparser (<5)", "importlib-metadata (<3)", "importlib-resources (<4)", "sphinx (<2)", "sphinxcontrib-websupport (<2)", "tox", "zipp (<2)"] +dev = ["PyTest (<5) ; python_version < \"3.6\"", "PyTest ; python_version >= \"3.6\"", "PyTest-Cov (<2.6) ; python_version < \"3.6\"", "PyTest-Cov ; python_version >= \"3.6\"", "bump2version (<1)", "configparser (<5) ; python_version < \"3\"", "importlib-metadata (<3) ; python_version < \"3\"", "importlib-resources (<4) ; python_version < \"3\"", "sphinx (<2)", "sphinxcontrib-websupport (<2) ; python_version < \"3\"", "tox", "zipp (<2) ; python_version < \"3\""] [[package]] name = "docutils" @@ -489,6 +481,7 @@ version = "0.19" description = "Docutils -- Python Documentation Utilities" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"}, {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"}, @@ -500,6 +493,8 @@ version = "4.1.5" description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and lxml" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"}, {file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"}, @@ -514,6 +509,7 @@ version = "4.0.10" description = "Git Object Database" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"}, {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"}, @@ -524,123 +520,141 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.43" +version = "3.1.44" description = "GitPython is a Python library used to interact with Git repositories" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ - {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"}, - {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"}, + {file = "GitPython-3.1.44-py3-none-any.whl", hash = "sha256:9e0e10cda9bed1ee64bc9a6de50e7e38a9c9943241cd7f585f6df3ed28011110"}, + {file = "gitpython-3.1.44.tar.gz", hash = "sha256:c87e30b26253bf5418b01b0660f818967f3c503193838337fe5e573331249269"}, ] [package.dependencies] gitdb = ">=4.0.1,<5" [package.extras] -doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"] -test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"] +doc = ["sphinx (>=7.1.2,<7.2)", "sphinx-autodoc-typehints", "sphinx_rtd_theme"] +test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock ; python_version < \"3.8\"", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions ; python_version < \"3.11\""] [[package]] name = "hiredis" -version = "3.0.0" +version = "3.1.0" description = "Python wrapper for hiredis" optional = true python-versions = ">=3.8" -files = [ - {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:4b182791c41c5eb1d9ed736f0ff81694b06937ca14b0d4dadde5dadba7ff6dae"}, - {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:13c275b483a052dd645eb2cb60d6380f1f5215e4c22d6207e17b86be6dd87ffa"}, - {file = "hiredis-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1018cc7f12824506f165027eabb302735b49e63af73eb4d5450c66c88f47026"}, - {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83a29cc7b21b746cb6a480189e49f49b2072812c445e66a9e38d2004d496b81c"}, - {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e241fab6332e8fb5f14af00a4a9c6aefa22f19a336c069b7ddbf28ef8341e8d6"}, - {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1fb8de899f0145d6c4d5d4bd0ee88a78eb980a7ffabd51e9889251b8f58f1785"}, - {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b23291951959141173eec10f8573538e9349fa27f47a0c34323d1970bf891ee5"}, - {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e421ac9e4b5efc11705a0d5149e641d4defdc07077f748667f359e60dc904420"}, - {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:77c8006c12154c37691b24ff293c077300c22944018c3ff70094a33e10c1d795"}, - {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:41afc0d3c18b59eb50970479a9c0e5544fb4b95e3a79cf2fbaece6ddefb926fe"}, - {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:04ccae6dcd9647eae6025425ab64edb4d79fde8b9e6e115ebfabc6830170e3b2"}, - {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fe91d62b0594db5ea7d23fc2192182b1a7b6973f628a9b8b2e0a42a2be721ac6"}, - {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:99516d99316062824a24d145d694f5b0d030c80da693ea6f8c4ecf71a251d8bb"}, - {file = "hiredis-3.0.0-cp310-cp310-win32.whl", hash = "sha256:562eaf820de045eb487afaa37e6293fe7eceb5b25e158b5a1974b7e40bf04543"}, - {file = "hiredis-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a1c81c89ed765198da27412aa21478f30d54ef69bf5e4480089d9c3f77b8f882"}, - {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:4664dedcd5933364756d7251a7ea86d60246ccf73a2e00912872dacbfcef8978"}, - {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:47de0bbccf4c8a9f99d82d225f7672b9dd690d8fd872007b933ef51a302c9fa6"}, - {file = "hiredis-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e43679eca508ba8240d016d8cca9d27342d70184773c15bea78a23c87a1922f1"}, - {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13c345e7278c210317e77e1934b27b61394fee0dec2e8bd47e71570900f75823"}, - {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00018f22f38530768b73ea86c11f47e8d4df65facd4e562bd78773bd1baef35e"}, - {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ea3a86405baa8eb0d3639ced6926ad03e07113de54cb00fd7510cb0db76a89d"}, - {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c073848d2b1d5561f3903879ccf4e1a70c9b1e7566c7bdcc98d082fa3e7f0a1d"}, - {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a8dffb5f5b3415a4669d25de48b617fd9d44b0bccfc4c2ab24b06406ecc9ecb"}, - {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:22c17c96143c2a62dfd61b13803bc5de2ac526b8768d2141c018b965d0333b66"}, - {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c3ece960008dab66c6b8bb3a1350764677ee7c74ccd6270aaf1b1caf9ccebb46"}, - {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f75999ae00a920f7dce6ecae76fa5e8674a3110e5a75f12c7a2c75ae1af53396"}, - {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e069967cbd5e1900aafc4b5943888f6d34937fc59bf8918a1a546cb729b4b1e4"}, - {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0aacc0a78e1d94d843a6d191f224a35893e6bdfeb77a4a89264155015c65f126"}, - {file = "hiredis-3.0.0-cp311-cp311-win32.whl", hash = "sha256:719c32147ba29528cb451f037bf837dcdda4ff3ddb6cdb12c4216b0973174718"}, - {file = "hiredis-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:bdc144d56333c52c853c31b4e2e52cfbdb22d3da4374c00f5f3d67c42158970f"}, - {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:484025d2eb8f6348f7876fc5a2ee742f568915039fcb31b478fd5c242bb0fe3a"}, - {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fcdb552ffd97151dab8e7bc3ab556dfa1512556b48a367db94b5c20253a35ee1"}, - {file = "hiredis-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bb6f9fd92f147ba11d338ef5c68af4fd2908739c09e51f186e1d90958c68cc1"}, - {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa86bf9a0ed339ec9e8a9a9d0ae4dccd8671625c83f9f9f2640729b15e07fbfd"}, - {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e194a0d5df9456995d8f510eab9f529213e7326af6b94770abf8f8b7952ddcaa"}, - {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a1df39d74ec507d79c7a82c8063eee60bf80537cdeee652f576059b9cdd15c"}, - {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f91456507427ba36fd81b2ca11053a8e112c775325acc74e993201ea912d63e9"}, - {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9862db92ef67a8a02e0d5370f07d380e14577ecb281b79720e0d7a89aedb9ee5"}, - {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d10fcd9e0eeab835f492832b2a6edb5940e2f1230155f33006a8dfd3bd2c94e4"}, - {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:48727d7d405d03977d01885f317328dc21d639096308de126c2c4e9950cbd3c9"}, - {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e0bb6102ebe2efecf8a3292c6660a0e6fac98176af6de67f020bea1c2343717"}, - {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:df274e3abb4df40f4c7274dd3e587dfbb25691826c948bc98d5fead019dfb001"}, - {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:034925b5fb514f7b11aac38cd55b3fd7e9d3af23bd6497f3f20aa5b8ba58e232"}, - {file = "hiredis-3.0.0-cp312-cp312-win32.whl", hash = "sha256:120f2dda469b28d12ccff7c2230225162e174657b49cf4cd119db525414ae281"}, - {file = "hiredis-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:e584fe5f4e6681d8762982be055f1534e0170f6308a7a90f58d737bab12ff6a8"}, - {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:122171ff47d96ed8dd4bba6c0e41d8afaba3e8194949f7720431a62aa29d8895"}, - {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ba9fc605ac558f0de67463fb588722878641e6fa1dabcda979e8e69ff581d0bd"}, - {file = "hiredis-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a631e2990b8be23178f655cae8ac6c7422af478c420dd54e25f2e26c29e766f1"}, - {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63482db3fadebadc1d01ad33afa6045ebe2ea528eb77ccaabd33ee7d9c2bad48"}, - {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f669212c390eebfbe03c4e20181f5970b82c5d0a0ad1df1785f7ffbe7d61150"}, - {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a49ef161739f8018c69b371528bdb47d7342edfdee9ddc75a4d8caddf45a6e"}, - {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98a152052b8878e5e43a2e3a14075218adafc759547c98668a21e9485882696c"}, - {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50a196af0ce657fcde9bf8a0bbe1032e22c64d8fcec2bc926a35e7ff68b3a166"}, - {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f2f312eef8aafc2255e3585dcf94d5da116c43ef837db91db9ecdc1bc930072d"}, - {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:6ca41fa40fa019cde42c21add74aadd775e71458051a15a352eabeb12eb4d084"}, - {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:6eecb343c70629f5af55a8b3e53264e44fa04e155ef7989de13668a0cb102a90"}, - {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:c3fdad75e7837a475900a1d3a5cc09aa024293c3b0605155da2d42f41bc0e482"}, - {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8854969e7480e8d61ed7549eb232d95082a743e94138d98d7222ba4e9f7ecacd"}, - {file = "hiredis-3.0.0-cp38-cp38-win32.whl", hash = "sha256:f114a6c86edbf17554672b050cce72abf489fe58d583c7921904d5f1c9691605"}, - {file = "hiredis-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:7d99b91e42217d7b4b63354b15b41ce960e27d216783e04c4a350224d55842a4"}, - {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:4c6efcbb5687cf8d2aedcc2c3ed4ac6feae90b8547427d417111194873b66b06"}, - {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5b5cff42a522a0d81c2ae7eae5e56d0ee7365e0c4ad50c4de467d8957aff4414"}, - {file = "hiredis-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:82f794d564f4bc76b80c50b03267fe5d6589e93f08e66b7a2f674faa2fa76ebc"}, - {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a4c1791d7aa7e192f60fe028ae409f18ccdd540f8b1e6aeb0df7816c77e4a4"}, - {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2537b2cd98192323fce4244c8edbf11f3cac548a9d633dbbb12b48702f379f4"}, - {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fed69bbaa307040c62195a269f82fc3edf46b510a17abb6b30a15d7dab548df"}, - {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869f6d5537d243080f44253491bb30aa1ec3c21754003b3bddeadedeb65842b0"}, - {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d435ae89073d7cd51e6b6bf78369c412216261c9c01662e7008ff00978153729"}, - {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:204b79b30a0e6be0dc2301a4d385bb61472809f09c49f400497f1cdd5a165c66"}, - {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3ea635101b739c12effd189cc19b2671c268abb03013fd1f6321ca29df3ca625"}, - {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f359175197fd833c8dd7a8c288f1516be45415bb5c939862ab60c2918e1e1943"}, - {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ac6d929cb33dd12ad3424b75725975f0a54b5b12dbff95f2a2d660c510aa106d"}, - {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:100431e04d25a522ef2c3b94f294c4219c4de3bfc7d557b6253296145a144c11"}, - {file = "hiredis-3.0.0-cp39-cp39-win32.whl", hash = "sha256:e1a9c14ae9573d172dc050a6f63a644457df5d01ec4d35a6a0f097f812930f83"}, - {file = "hiredis-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:54a6dd7b478e6eb01ce15b3bb5bf771e108c6c148315bf194eb2ab776a3cac4d"}, - {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:50da7a9edf371441dfcc56288d790985ee9840d982750580710a9789b8f4a290"}, - {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9b285ef6bf1581310b0d5e8f6ce64f790a1c40e89c660e1320b35f7515433672"}, - {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dcfa684966f25b335072115de2f920228a3c2caf79d4bfa2b30f6e4f674a948"}, - {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a41be8af1fd78ca97bc948d789a09b730d1e7587d07ca53af05758f31f4b985d"}, - {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:038756db735e417ab36ee6fd7725ce412385ed2bd0767e8179a4755ea11b804f"}, - {file = "hiredis-3.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:fcecbd39bd42cef905c0b51c9689c39d0cc8b88b1671e7f40d4fb213423aef3a"}, - {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a131377493a59fb0f5eaeb2afd49c6540cafcfba5b0b3752bed707be9e7c4eaf"}, - {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d22c53f0ec5c18ecb3d92aa9420563b1c5d657d53f01356114978107b00b860"}, - {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a91e9520fbc65a799943e5c970ffbcd67905744d8becf2e75f9f0a5e8414f0"}, - {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc8043959b50141df58ab4f398e8ae84c6f9e673a2c9407be65fc789138f4a6"}, - {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51b99cfac514173d7b8abdfe10338193e8a0eccdfe1870b646009d2fb7cbe4b5"}, - {file = "hiredis-3.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:fa1fcad89d8a41d8dc10b1e54951ec1e161deabd84ed5a2c95c3c7213bdb3514"}, - {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:898636a06d9bf575d2c594129085ad6b713414038276a4bfc5db7646b8a5be78"}, - {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:466f836dbcf86de3f9692097a7a01533dc9926986022c6617dc364a402b265c5"}, - {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23142a8af92a13fc1e3f2ca1d940df3dcf2af1d176be41fe8d89e30a837a0b60"}, - {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:793c80a3d6b0b0e8196a2d5de37a08330125668c8012922685e17aa9108c33ac"}, - {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:467d28112c7faa29b7db743f40803d927c8591e9da02b6ce3d5fadc170a542a2"}, - {file = "hiredis-3.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:dc384874a719c767b50a30750f937af18842ee5e288afba95a5a3ed703b1515a"}, - {file = "hiredis-3.0.0.tar.gz", hash = "sha256:fed8581ae26345dea1f1e0d1a96e05041a727a45e7d8d459164583e23c6ac441"}, +groups = ["main"] +markers = "extra == \"all\" or extra == \"redis\"" +files = [ + {file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:2892db9db21f0cf7cc298d09f85d3e1f6dc4c4c24463ab67f79bc7a006d51867"}, + {file = "hiredis-3.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:93cfa6cc25ee2ceb0be81dc61eca9995160b9e16bdb7cca4a00607d57e998918"}, + {file = "hiredis-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2af62070aa9433802cae7be7364d5e82f76462c6a2ae34e53008b637aaa9a156"}, + {file = "hiredis-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:072c162260ebb1d892683107da22d0d5da7a1414739eae4e185cac22fe89627f"}, + {file = "hiredis-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6b232c43e89755ba332c2745ddab059c0bc1a0f01448a3a14d506f8448b1ce6"}, + {file = "hiredis-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb5316c9a65c4dde80796aa245b76011bab64eb84461a77b0a61c1bf2970bcc9"}, + {file = "hiredis-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e812a4e656bbd1c1c15c844b28259c49e26bb384837e44e8d2aa55412c91d2f7"}, + {file = "hiredis-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93a6c9230e5a5565847130c0e1005c8d3aa5ca681feb0ed542c4651323d32feb"}, + {file = "hiredis-3.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a5f65e89ce50a94d9490d5442a649c6116f53f216c8c14eb37cf9637956482b2"}, + {file = "hiredis-3.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9b2d6e33601c67c074c367fdccdd6033e642284e7a56adc130f18f724c378ca8"}, + {file = "hiredis-3.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:bad3b1e0c83849910f28c95953417106f539277035a4b515d1425f93947bc28f"}, + {file = "hiredis-3.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:9646de31f5994e6218311dcf216e971703dbf804c510fd3f84ddb9813c495824"}, + {file = "hiredis-3.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:59a9230f3aa38a33d09d8171400de202f575d7a38869e5ce2947829bca6fe359"}, + {file = "hiredis-3.1.0-cp310-cp310-win32.whl", hash = "sha256:0322d70f3328b97da14b6e98b18f0090a12ed8a8bf7ae20932e2eb9d1bb0aa2c"}, + {file = "hiredis-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:802474c18e878b3f9905e160a8b7df87d57885758083eda76c5978265acb41aa"}, + {file = "hiredis-3.1.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:c339ff4b4739b2a40da463763dd566129762f72926bca611ad9a457a9fe64abd"}, + {file = "hiredis-3.1.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:0ffa2552f704a45954627697a378fc2f559004e53055b82f00daf30bd4305330"}, + {file = "hiredis-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9acf7f0e7106f631cd618eb60ec9bbd6e43045addd5310f66ba1177209567e59"}, + {file = "hiredis-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea4f5ecf9dbea93c827486f59c606684c3496ea71c7ba9a8131932780696e61a"}, + {file = "hiredis-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39efab176fca3d5111075f6ba56cd864f18db46d858289d39360c5672e0e5c3e"}, + {file = "hiredis-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1110eae007f30e70a058d743e369c24430327cd01fd97d99519d6794a58dd587"}, + {file = "hiredis-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b390f63191bcccbb6044d4c118acdf4fa55f38e5658ac4cfd5a33a6f0c07659"}, + {file = "hiredis-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72a98ccc7b8ec9ce0100ecf59f45f05d2023606e8e3676b07a316d1c1c364072"}, + {file = "hiredis-3.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7c76e751fd1e2f221dec09cdc24040ee486886e943d5d7ffc256e8cf15c75e51"}, + {file = "hiredis-3.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7d3880f213b6f14e9c69ce52beffd1748eecc8669698c4782761887273b6e1bd"}, + {file = "hiredis-3.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:87c2b3fe7e7c96eba376506a76e11514e07e848f737b254e0973e4b5c3a491e9"}, + {file = "hiredis-3.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:d3cfb4089e96f8f8ee9554da93148a9261aa6612ad2cc202c1a494c7b712e31f"}, + {file = "hiredis-3.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4f12018e5c5f866a1c3f7017cb2d88e5c6f9440df2281e48865a2b6c40f247f4"}, + {file = "hiredis-3.1.0-cp311-cp311-win32.whl", hash = "sha256:107b66ce977bb2dff8f2239e68344360a75d05fed3d9fa0570ac4d3020ce2396"}, + {file = "hiredis-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:8f1240bde53d3d1676f0aba61b3661560dc9a681cae24d9de33e650864029aa4"}, + {file = "hiredis-3.1.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:f7c7f89e0bc4246115754e2eda078a111282f6d6ecc6fb458557b724fe6f2aac"}, + {file = "hiredis-3.1.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:3dbf9163296fa45fbddcfc4c5900f10e9ddadda37117dbfb641e327e536b53e0"}, + {file = "hiredis-3.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:af46a4be0e82df470f68f35316fa16cd1e134d1c5092fc1082e1aad64cce716d"}, + {file = "hiredis-3.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc63d698c43aea500a84d8b083f830c03808b6cf3933ae4d35a27f0a3d881652"}, + {file = "hiredis-3.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:676b3d88674134bfaaf70dac181d1790b0f33b3187bfb9da9221e17e0e624f83"}, + {file = "hiredis-3.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aed10d9df1e2fb0011db2713ac64497462e9c2c0208b648c97569da772b959ca"}, + {file = "hiredis-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b5bd8adfe8742e331a94cccd782bffea251fa70d9a709e71f4510f50794d700"}, + {file = "hiredis-3.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9fc4e35b4afb0af6da55495dd0742ad32ab88150428a6ecdbb3085cbd60714e8"}, + {file = "hiredis-3.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:89b83e76eb00ab0464e7b0752a3ffcb02626e742e9509bc141424a9c3202e8dc"}, + {file = "hiredis-3.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:98ebf08c907836b70a8f40e030df8ab6f174dc7f6fa765251d813e89f14069d8"}, + {file = "hiredis-3.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6c840b9cec086328f2ee2cfee0038b5d6bbb514bac7b5e579da6e346eaac056c"}, + {file = "hiredis-3.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c5c44e9fa6f4462d0330cb5f5d46fa652512fc86b41d4d1974d0356f263e9105"}, + {file = "hiredis-3.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e665b14ab50aa175cfa306fcb00fffd4e3ff02ceb36ca6a4df00b1246d6a73c4"}, + {file = "hiredis-3.1.0-cp312-cp312-win32.whl", hash = "sha256:bd33db977ac7af97e8d035ffadb163b00546be22e5f1297b2123f5f9bf0f8a21"}, + {file = "hiredis-3.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:37aed4aa9348600145e2d019c7be27855e503ecc4906c6976ff2f3b52e3d5d97"}, + {file = "hiredis-3.1.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:b87cddd8107487863fed6994de51e5594a0be267b0b19e213694e99cdd614623"}, + {file = "hiredis-3.1.0-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:d302deff8cb63a7feffc1844e4dafc8076e566bbf10c5aaaf0f4fe791b8a6bd0"}, + {file = "hiredis-3.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a018340c073cf88cb635b2bedff96619df2f666018c655e7911f46fa2c1c178"}, + {file = "hiredis-3.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1e8ba6414ac1ae536129e18c069f3eb497df5a74e136e3566471620a4fa5f95"}, + {file = "hiredis-3.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a86b9fef256c2beb162244791fdc025aa55f936d6358e86e2020e512fe2e4972"}, + {file = "hiredis-3.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7acdc68e29a446ad17aadaff19c981a36b3bd8c894c3520412c8a7ab1c3e0de7"}, + {file = "hiredis-3.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7e06baea05de57e1e7548064f505a6964e992674fe61b8f274afe2ac93b6371"}, + {file = "hiredis-3.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35b5fc061c8a0dbfdb440053280504d6aaa8d9726bd4d1d0e1cfcbbdf0d60b73"}, + {file = "hiredis-3.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c89d2dcb271d24c44f02264233b75d5db8c58831190fa92456a90b87fa17b748"}, + {file = "hiredis-3.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:aa36688c10a08f626fddcf68c2b1b91b0e90b070c26e550a4151a877f5c2d431"}, + {file = "hiredis-3.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f3982a9c16c1c4bc05a00b65d01ffb8d80ea1a7b6b533be2f1a769d3e989d2c0"}, + {file = "hiredis-3.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d1a6f889514ee2452300c9a06862fceedef22a2891f1c421a27b1ba52ef130b2"}, + {file = "hiredis-3.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8a45ff7915392a55d9386bb235ea1d1eb9960615f301979f02143fc20036b699"}, + {file = "hiredis-3.1.0-cp313-cp313-win32.whl", hash = "sha256:539e5bb725b62b76a5319a4e68fc7085f01349abc2316ef3df608ea0883c51d2"}, + {file = "hiredis-3.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:9020fd7e58f489fda6a928c31355add0e665fd6b87b21954e675cf9943eafa32"}, + {file = "hiredis-3.1.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:b621a89fc29b3f4b01be6640ec81a6a94b5382bc78fecb876408d57a071e45aa"}, + {file = "hiredis-3.1.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:363e21fba55e1a26349dc9ca7da6b14332123879b6359bcee4a9acecb40ca33b"}, + {file = "hiredis-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c156156798729eadc9ab76ffee96c88b93cc1c3b493f4dd0a4341f53939194ee"}, + {file = "hiredis-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e38d8a325f9a6afac1b1c72d996d1add9e1b99696ce9410538ba5e9aa8fdba02"}, + {file = "hiredis-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3004ef7436feb7bfa61c0b36d422b8fb8c29aaa1a514c9405f0fdee5e9694dd3"}, + {file = "hiredis-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13f5b16f97d0bbd1c04ce367c49097d1214d60e11f9fee7ef2a9b54e0a6645c8"}, + {file = "hiredis-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:230dd0e77cb0f525f58a1306a7b4aaf078037fc5229110922332ca46f90821bb"}, + {file = "hiredis-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d968116caddd19d63120d1298e62b1bbc694db3360ed0d5df8c3a97edbc12552"}, + {file = "hiredis-3.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:511e36a6fa41d3efab3cd5cd70ac388ed825993b9e66fa3b0e47cf27a2f5ffee"}, + {file = "hiredis-3.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c5cd20804e3cb0d31e7d899d8dd091f569c33fe40d4bade670a067ab7d31c2ac"}, + {file = "hiredis-3.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:09e89e7d34cfe5ca8f7a869fca827d1af0afe8aaddb26b38c01058730edb79ad"}, + {file = "hiredis-3.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:570cbf31413c77fe5e7c157f2943ca4400493ddd9cf2184731cfcafc753becd7"}, + {file = "hiredis-3.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b9b4da8162cf289781732d6a5ba01d820c42c05943fcdb7de307d03639961db3"}, + {file = "hiredis-3.1.0-cp38-cp38-win32.whl", hash = "sha256:bc117a04bcb461d3bb1b2c5b417aee3442e1e8aa33ebc800481431f4c09fe0c5"}, + {file = "hiredis-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:34f3f5f0354db2d6797a6fb08d2c036a50af62a1d919d122c1c784304ef49347"}, + {file = "hiredis-3.1.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:a26fa888025badb5563f283cc19594c215a413e905729e59a5f7cf3f46d66c32"}, + {file = "hiredis-3.1.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:f50763cd819d4a52a47b5966d4bb47dee34b637c5fa6402509800eee6ecb61e6"}, + {file = "hiredis-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b6d1c9e1fce5e0a94072667ae2bf0142b89ebbb1917d3531184e060a43f3ee11"}, + {file = "hiredis-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e38d7a56b1a79ed0bbb9e6fe376d82e3f4dcc646ae47472f2c858e19a597c112"}, + {file = "hiredis-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ef5ad8b91530e4d10a68562b0a380ea22705a60e88cecee086d7c63a38564ce"}, + {file = "hiredis-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf3d2299b054e57a9f97ca08704c2843e44f29b57dc69b76a2592ecd212efe1a"}, + {file = "hiredis-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93811d60b0f73d0f049c86f4373a3833b4a38fce374ab151074d929553eb4304"}, + {file = "hiredis-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e703ff860c1d83abbcf57012b309ead02b56b60e85150c6c3bfb37cbb16ebf"}, + {file = "hiredis-3.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f9ea0678806c53d96758e74c6a898f9d506a2e3367a344757f768bef9e069366"}, + {file = "hiredis-3.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:cf6844035abf47d52a1c3f4257255af3bf3b0f14d559b08eaa45885418c6c55d"}, + {file = "hiredis-3.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:7acf35cfa7ec9e1e7559c04e7095628f7d06049b5f24dcb58c1a55ef6dc689f8"}, + {file = "hiredis-3.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b885695dce7a39b1fd9a609ed9c4cf312e53df2ec028d5a78af7a891b5fbea4d"}, + {file = "hiredis-3.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1c22fa74ddd063396b19fe8445a1ae8b4190eff755d5750dda48e860a45b2ee7"}, + {file = "hiredis-3.1.0-cp39-cp39-win32.whl", hash = "sha256:0614e16339f1784df3bbd2800322e20b4127d3f3a3509f00a5562efddb2521aa"}, + {file = "hiredis-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:c2bc713ee73ab9de4a0d68b0ab0f29612342b63173714742437b977584adb2d8"}, + {file = "hiredis-3.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:07ab990d0835f36bf358dbb84db4541ac0a8f533128ec09af8f80a576eef2e88"}, + {file = "hiredis-3.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c54a88eb9d8ebc4e5eefaadbe2102a4f7499f9e413654172f40aefd25350959"}, + {file = "hiredis-3.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8095ef159896e5999a795b0f80e4d64281301a109e442a8d29cd750ca6bd8303"}, + {file = "hiredis-3.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f8ca13e2476ffd6d5be4763f5868133506ddcfa5ce54b4dac231ebdc19be6c6"}, + {file = "hiredis-3.1.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d25aa25c10f966d5415795ed271da84605044dbf436c054966cea5442451b3"}, + {file = "hiredis-3.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4180dc5f646b426e5fa1212e1348c167ee2a864b3a70d56579163d64a847dd1e"}, + {file = "hiredis-3.1.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d92144e0cd6e6e841a6ad343e9d58631626eeb4ac96b0322649379b5d4527447"}, + {file = "hiredis-3.1.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:fcb91ba42903de637b94a1b64477f381f94ad82c0742c264f9245be76a7a3cbc"}, + {file = "hiredis-3.1.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ce71a797b5bc02c51da082428c00251ed6a7a67a03acbda5fbf9e8d028725f6"}, + {file = "hiredis-3.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e04c7feb9467e3170cd4d5bee381775783d81bbc45d6147c1c0ce3b50dc04f9"}, + {file = "hiredis-3.1.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a31806306a60f3565c04c964d6bee0e9d4a5120e1da589e41976b53972edf635"}, + {file = "hiredis-3.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:bc51f594c2c0863ded6501642dc96701ca8bbea9ced4fa3af0a1aeda8aa634cb"}, + {file = "hiredis-3.1.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4663a319ab7d22c597b9421e5ea384fd583e044f2f1ca9a1b98d4fef8a0fea2f"}, + {file = "hiredis-3.1.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:8060fa256862b0c3de64a73ab45bc1ccf381caca464f2647af9075b200828948"}, + {file = "hiredis-3.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e9445b7f117a9c8c8ccad97cb44daa55ddccff3cbc9079984eac56d982ba01f"}, + {file = "hiredis-3.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:732cf1c5cf1324f7bf3b6086976fe62a2ca98f0bf6316f31063c2c67be8797bc"}, + {file = "hiredis-3.1.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2102a94063d878c40df92f55199637a74f535e3a0b79ceba4a00538853a21be3"}, + {file = "hiredis-3.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d968dde69e3fe903bf9ef00667669dcf04a3e096e33aaf138775106ead138bc8"}, + {file = "hiredis-3.1.0.tar.gz", hash = "sha256:51d40ac3611091020d7dea6b05ed62cb152bff595fa4f931e7b6479d777acf7c"}, ] [[package]] @@ -649,6 +663,7 @@ version = "21.0.0" description = "A featureful, immutable, and correct URL for Python." optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["main"] files = [ {file = "hyperlink-21.0.0-py2.py3-none-any.whl", hash = "sha256:e6b14c37ecb73e89c77d78cdb4c2cc8f3fb59a885c5b3f819ff4ed80f25af1b4"}, {file = "hyperlink-21.0.0.tar.gz", hash = "sha256:427af957daa58bc909471c6c40f74c5450fa123dd093fc53efd2e91d2705a56b"}, @@ -658,22 +673,47 @@ files = [ idna = ">=2.5" [[package]] +name = "id" +version = "1.5.0" +description = "A tool for generating OIDC identities" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "id-1.5.0-py3-none-any.whl", hash = "sha256:f1434e1cef91f2cbb8a4ec64663d5a23b9ed43ef44c4c957d02583d61714c658"}, + {file = "id-1.5.0.tar.gz", hash = "sha256:292cb8a49eacbbdbce97244f47a97b4c62540169c976552e497fd57df0734c1d"}, +] + +[package.dependencies] +requests = "*" + +[package.extras] +dev = ["build", "bump (>=1.3.2)", "id[lint,test]"] +lint = ["bandit", "interrogate", "mypy", "ruff (<0.8.2)", "types-requests"] +test = ["coverage[toml]", "pretend", "pytest", "pytest-cov"] + +[[package]] name = "idna" -version = "3.7" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" +groups = ["main", "dev"] files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "ijson" version = "3.3.0" description = "Iterative JSON parser with standard Python iterator interfaces" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "ijson-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7f7a5250599c366369fbf3bc4e176f5daa28eb6bc7d6130d02462ed335361675"}, {file = "ijson-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f87a7e52f79059f9c58f6886c262061065eb6f7554a587be7ed3aa63e6b71b34"}, @@ -773,13 +813,14 @@ files = [ [[package]] name = "immutabledict" -version = "4.2.0" +version = "4.2.1" description = "Immutable wrapper around dictionaries (a fork of frozendict)" optional = false -python-versions = ">=3.8,<4.0" +python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "immutabledict-4.2.0-py3-none-any.whl", hash = "sha256:d728b2c2410d698d95e6200237feb50a695584d20289ad3379a439aa3d90baba"}, - {file = "immutabledict-4.2.0.tar.gz", hash = "sha256:e003fd81aad2377a5a758bf7e1086cf3b70b63e9a5cc2f46bce8d0a2b4727c5f"}, + {file = "immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373"}, + {file = "immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc"}, ] [[package]] @@ -788,6 +829,8 @@ version = "6.7.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and python_version < \"3.12\" or python_version < \"3.10\"" files = [ {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, @@ -799,7 +842,7 @@ zipp = ">=0.5" [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +testing = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\"", "pytest-perf (>=0.9.2)", "pytest-ruff"] [[package]] name = "importlib-resources" @@ -807,6 +850,8 @@ version = "5.12.0" description = "Read resources from Python packages" optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version < \"3.10\"" files = [ {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, @@ -817,7 +862,7 @@ zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8 ; python_version < \"3.12\"", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\""] [[package]] name = "incremental" @@ -825,6 +870,7 @@ version = "24.7.2" description = "A small library that versions your Python projects." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "incremental-24.7.2-py3-none-any.whl", hash = "sha256:8cb2c3431530bec48ad70513931a760f446ad6c25e8333ca5d95e24b0ed7b8fe"}, {file = "incremental-24.7.2.tar.gz", hash = "sha256:fb4f1d47ee60efe87d4f6f0ebb5f70b9760db2b2574c59c8e8912be4ebd464c9"}, @@ -838,25 +884,13 @@ tomli = {version = "*", markers = "python_version < \"3.11\""} scripts = ["click (>=6.0)"] [[package]] -name = "isort" -version = "5.13.2" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, - {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, -] - -[package.extras] -colors = ["colorama (>=0.4.6)"] - -[[package]] name = "jaeger-client" version = "4.8.0" description = "Jaeger Python OpenTracing Tracer implementation" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, ] @@ -876,6 +910,8 @@ version = "3.2.3" description = "Utility functions for Python class constructs" optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" files = [ {file = "jaraco.classes-3.2.3-py3-none-any.whl", hash = "sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158"}, {file = "jaraco.classes-3.2.3.tar.gz", hash = "sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a"}, @@ -886,7 +922,7 @@ more-itertools = "*" [package.extras] docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] -testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\""] [[package]] name = "jeepney" @@ -894,6 +930,8 @@ version = "0.8.0" description = "Low-level, pure Python DBus protocol wrapper." optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"linux\"" files = [ {file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"}, {file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"}, @@ -901,17 +939,18 @@ files = [ [package.extras] test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"] -trio = ["async_generator", "trio"] +trio = ["async_generator ; python_version == \"3.6\"", "trio"] [[package]] name = "jinja2" -version = "3.1.4" +version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ - {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, - {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, ] [package.dependencies] @@ -926,6 +965,7 @@ version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, @@ -933,9 +973,7 @@ files = [ [package.dependencies] attrs = ">=22.2.0" -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} jsonschema-specifications = ">=2023.03.6" -pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} referencing = ">=0.28.4" rpds-py = ">=0.7.1" @@ -949,13 +987,13 @@ version = "2023.6.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "jsonschema_specifications-2023.6.1-py3-none-any.whl", hash = "sha256:3d2b82663aff01815f744bb5c7887e2121a63399b49b104a3c96145474d091d7"}, {file = "jsonschema_specifications-2023.6.1.tar.gz", hash = "sha256:ca1c4dd059a9e7b34101cf5b3ab7ff1d18b139f35950d598d629837ef66e8f28"}, ] [package.dependencies] -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} referencing = ">=0.28.0" [[package]] @@ -964,6 +1002,8 @@ version = "23.13.1" description = "Store and access your passwords safely." optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" files = [ {file = "keyring-23.13.1-py3-none-any.whl", hash = "sha256:771ed2a91909389ed6148631de678f82ddc73737d85a927f382a8a1b157898cd"}, {file = "keyring-23.13.1.tar.gz", hash = "sha256:ba2e15a9b35e21908d0aaf4e0a47acc52d6ae33444df0da2b49d41a46ef6d678"}, @@ -971,7 +1011,6 @@ files = [ [package.dependencies] importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""} -importlib-resources = {version = "*", markers = "python_version < \"3.9\""} "jaraco.classes" = "*" jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""} pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""} @@ -980,7 +1019,7 @@ SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""} [package.extras] completion = ["shtab"] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] -testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7) ; platform_python_implementation != \"PyPy\"", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8 ; python_version < \"3.12\"", "pytest-mypy (>=0.9.1) ; platform_python_implementation != \"PyPy\""] [[package]] name = "ldap3" @@ -988,6 +1027,8 @@ version = "2.9.1" description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library" optional = true python-versions = "*" +groups = ["main"] +markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" files = [ {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, @@ -998,161 +1039,153 @@ pyasn1 = ">=0.4.6" [[package]] name = "lxml" -version = "5.2.2" +version = "5.4.0" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." optional = true python-versions = ">=3.6" -files = [ - {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:364d03207f3e603922d0d3932ef363d55bbf48e3647395765f9bfcbdf6d23632"}, - {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50127c186f191b8917ea2fb8b206fbebe87fd414a6084d15568c27d0a21d60db"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74e4f025ef3db1c6da4460dd27c118d8cd136d0391da4e387a15e48e5c975147"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:981a06a3076997adf7c743dcd0d7a0415582661e2517c7d961493572e909aa1d"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aef5474d913d3b05e613906ba4090433c515e13ea49c837aca18bde190853dff"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e275ea572389e41e8b039ac076a46cb87ee6b8542df3fff26f5baab43713bca"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5b65529bb2f21ac7861a0e94fdbf5dc0daab41497d18223b46ee8515e5ad297"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bcc98f911f10278d1daf14b87d65325851a1d29153caaf146877ec37031d5f36"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:b47633251727c8fe279f34025844b3b3a3e40cd1b198356d003aa146258d13a2"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:fbc9d316552f9ef7bba39f4edfad4a734d3d6f93341232a9dddadec4f15d425f"}, - {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:13e69be35391ce72712184f69000cda04fc89689429179bc4c0ae5f0b7a8c21b"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b6a30a9ab040b3f545b697cb3adbf3696c05a3a68aad172e3fd7ca73ab3c835"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a233bb68625a85126ac9f1fc66d24337d6e8a0f9207b688eec2e7c880f012ec0"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:dfa7c241073d8f2b8e8dbc7803c434f57dbb83ae2a3d7892dd068d99e96efe2c"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1a7aca7964ac4bb07680d5c9d63b9d7028cace3e2d43175cb50bba8c5ad33316"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ae4073a60ab98529ab8a72ebf429f2a8cc612619a8c04e08bed27450d52103c0"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ffb2be176fed4457e445fe540617f0252a72a8bc56208fd65a690fdb1f57660b"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e290d79a4107d7d794634ce3e985b9ae4f920380a813717adf61804904dc4393"}, - {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:96e85aa09274955bb6bd483eaf5b12abadade01010478154b0ec70284c1b1526"}, - {file = "lxml-5.2.2-cp310-cp310-win32.whl", hash = "sha256:f956196ef61369f1685d14dad80611488d8dc1ef00be57c0c5a03064005b0f30"}, - {file = "lxml-5.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:875a3f90d7eb5c5d77e529080d95140eacb3c6d13ad5b616ee8095447b1d22e7"}, - {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45f9494613160d0405682f9eee781c7e6d1bf45f819654eb249f8f46a2c22545"}, - {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0b3f2df149efb242cee2ffdeb6674b7f30d23c9a7af26595099afaf46ef4e88"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d28cb356f119a437cc58a13f8135ab8a4c8ece18159eb9194b0d269ec4e28083"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:657a972f46bbefdbba2d4f14413c0d079f9ae243bd68193cb5061b9732fa54c1"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b9ea10063efb77a965a8d5f4182806fbf59ed068b3c3fd6f30d2ac7bee734"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07542787f86112d46d07d4f3c4e7c760282011b354d012dc4141cc12a68cef5f"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:303f540ad2dddd35b92415b74b900c749ec2010e703ab3bfd6660979d01fd4ed"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2eb2227ce1ff998faf0cd7fe85bbf086aa41dfc5af3b1d80867ecfe75fb68df3"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:1d8a701774dfc42a2f0b8ccdfe7dbc140500d1049e0632a611985d943fcf12df"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:56793b7a1a091a7c286b5f4aa1fe4ae5d1446fe742d00cdf2ffb1077865db10d"}, - {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eb00b549b13bd6d884c863554566095bf6fa9c3cecb2e7b399c4bc7904cb33b5"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a2569a1f15ae6c8c64108a2cd2b4a858fc1e13d25846be0666fc144715e32ab"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:8cf85a6e40ff1f37fe0f25719aadf443686b1ac7652593dc53c7ef9b8492b115"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:d237ba6664b8e60fd90b8549a149a74fcc675272e0e95539a00522e4ca688b04"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0b3f5016e00ae7630a4b83d0868fca1e3d494c78a75b1c7252606a3a1c5fc2ad"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23441e2b5339bc54dc949e9e675fa35efe858108404ef9aa92f0456929ef6fe8"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2fb0ba3e8566548d6c8e7dd82a8229ff47bd8fb8c2da237607ac8e5a1b8312e5"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:79d1fb9252e7e2cfe4de6e9a6610c7cbb99b9708e2c3e29057f487de5a9eaefa"}, - {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6dcc3d17eac1df7859ae01202e9bb11ffa8c98949dcbeb1069c8b9a75917e01b"}, - {file = "lxml-5.2.2-cp311-cp311-win32.whl", hash = "sha256:4c30a2f83677876465f44c018830f608fa3c6a8a466eb223535035fbc16f3438"}, - {file = "lxml-5.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:49095a38eb333aaf44c06052fd2ec3b8f23e19747ca7ec6f6c954ffea6dbf7be"}, - {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7429e7faa1a60cad26ae4227f4dd0459efde239e494c7312624ce228e04f6391"}, - {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:50ccb5d355961c0f12f6cf24b7187dbabd5433f29e15147a67995474f27d1776"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc911208b18842a3a57266d8e51fc3cfaccee90a5351b92079beed912a7914c2"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33ce9e786753743159799fdf8e92a5da351158c4bfb6f2db0bf31e7892a1feb5"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec87c44f619380878bd49ca109669c9f221d9ae6883a5bcb3616785fa8f94c97"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08ea0f606808354eb8f2dfaac095963cb25d9d28e27edcc375d7b30ab01abbf6"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75a9632f1d4f698b2e6e2e1ada40e71f369b15d69baddb8968dcc8e683839b18"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74da9f97daec6928567b48c90ea2c82a106b2d500f397eeb8941e47d30b1ca85"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:0969e92af09c5687d769731e3f39ed62427cc72176cebb54b7a9d52cc4fa3b73"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:9164361769b6ca7769079f4d426a41df6164879f7f3568be9086e15baca61466"}, - {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d26a618ae1766279f2660aca0081b2220aca6bd1aa06b2cf73f07383faf48927"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab67ed772c584b7ef2379797bf14b82df9aa5f7438c5b9a09624dd834c1c1aaf"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3d1e35572a56941b32c239774d7e9ad724074d37f90c7a7d499ab98761bd80cf"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8268cbcd48c5375f46e000adb1390572c98879eb4f77910c6053d25cc3ac2c67"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e282aedd63c639c07c3857097fc0e236f984ceb4089a8b284da1c526491e3f3d"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfdc2bfe69e9adf0df4915949c22a25b39d175d599bf98e7ddf620a13678585"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4aefd911793b5d2d7a921233a54c90329bf3d4a6817dc465f12ffdfe4fc7b8fe"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8b8df03a9e995b6211dafa63b32f9d405881518ff1ddd775db4e7b98fb545e1c"}, - {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f11ae142f3a322d44513de1018b50f474f8f736bc3cd91d969f464b5bfef8836"}, - {file = "lxml-5.2.2-cp312-cp312-win32.whl", hash = "sha256:16a8326e51fcdffc886294c1e70b11ddccec836516a343f9ed0f82aac043c24a"}, - {file = "lxml-5.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:bbc4b80af581e18568ff07f6395c02114d05f4865c2812a1f02f2eaecf0bfd48"}, - {file = "lxml-5.2.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e3d9d13603410b72787579769469af730c38f2f25505573a5888a94b62b920f8"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38b67afb0a06b8575948641c1d6d68e41b83a3abeae2ca9eed2ac59892b36706"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c689d0d5381f56de7bd6966a4541bff6e08bf8d3871bbd89a0c6ab18aa699573"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:cf2a978c795b54c539f47964ec05e35c05bd045db5ca1e8366988c7f2fe6b3ce"}, - {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:739e36ef7412b2bd940f75b278749106e6d025e40027c0b94a17ef7968d55d56"}, - {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d8bbcd21769594dbba9c37d3c819e2d5847656ca99c747ddb31ac1701d0c0ed9"}, - {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:2304d3c93f2258ccf2cf7a6ba8c761d76ef84948d87bf9664e14d203da2cd264"}, - {file = "lxml-5.2.2-cp36-cp36m-win32.whl", hash = "sha256:02437fb7308386867c8b7b0e5bc4cd4b04548b1c5d089ffb8e7b31009b961dc3"}, - {file = "lxml-5.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:edcfa83e03370032a489430215c1e7783128808fd3e2e0a3225deee278585196"}, - {file = "lxml-5.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28bf95177400066596cdbcfc933312493799382879da504633d16cf60bba735b"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a745cc98d504d5bd2c19b10c79c61c7c3df9222629f1b6210c0368177589fb8"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b590b39ef90c6b22ec0be925b211298e810b4856909c8ca60d27ffbca6c12e6"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b336b0416828022bfd5a2e3083e7f5ba54b96242159f83c7e3eebaec752f1716"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:c2faf60c583af0d135e853c86ac2735ce178f0e338a3c7f9ae8f622fd2eb788c"}, - {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:4bc6cb140a7a0ad1f7bc37e018d0ed690b7b6520ade518285dc3171f7a117905"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7ff762670cada8e05b32bf1e4dc50b140790909caa8303cfddc4d702b71ea184"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:57f0a0bbc9868e10ebe874e9f129d2917750adf008fe7b9c1598c0fbbfdde6a6"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:a6d2092797b388342c1bc932077ad232f914351932353e2e8706851c870bca1f"}, - {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:60499fe961b21264e17a471ec296dcbf4365fbea611bf9e303ab69db7159ce61"}, - {file = "lxml-5.2.2-cp37-cp37m-win32.whl", hash = "sha256:d9b342c76003c6b9336a80efcc766748a333573abf9350f4094ee46b006ec18f"}, - {file = "lxml-5.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b16db2770517b8799c79aa80f4053cd6f8b716f21f8aca962725a9565ce3ee40"}, - {file = "lxml-5.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7ed07b3062b055d7a7f9d6557a251cc655eed0b3152b76de619516621c56f5d3"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f60fdd125d85bf9c279ffb8e94c78c51b3b6a37711464e1f5f31078b45002421"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a7e24cb69ee5f32e003f50e016d5fde438010c1022c96738b04fc2423e61706"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23cfafd56887eaed93d07bc4547abd5e09d837a002b791e9767765492a75883f"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19b4e485cd07b7d83e3fe3b72132e7df70bfac22b14fe4bf7a23822c3a35bff5"}, - {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7ce7ad8abebe737ad6143d9d3bf94b88b93365ea30a5b81f6877ec9c0dee0a48"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e49b052b768bb74f58c7dda4e0bdf7b79d43a9204ca584ffe1fb48a6f3c84c66"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d14a0d029a4e176795cef99c056d58067c06195e0c7e2dbb293bf95c08f772a3"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:be49ad33819d7dcc28a309b86d4ed98e1a65f3075c6acd3cd4fe32103235222b"}, - {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a6d17e0370d2516d5bb9062c7b4cb731cff921fc875644c3d751ad857ba9c5b1"}, - {file = "lxml-5.2.2-cp38-cp38-win32.whl", hash = "sha256:5b8c041b6265e08eac8a724b74b655404070b636a8dd6d7a13c3adc07882ef30"}, - {file = "lxml-5.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:f61efaf4bed1cc0860e567d2ecb2363974d414f7f1f124b1df368bbf183453a6"}, - {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb91819461b1b56d06fa4bcf86617fac795f6a99d12239fb0c68dbeba41a0a30"}, - {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d4ed0c7cbecde7194cd3228c044e86bf73e30a23505af852857c09c24e77ec5d"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54401c77a63cc7d6dc4b4e173bb484f28a5607f3df71484709fe037c92d4f0ed"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:625e3ef310e7fa3a761d48ca7ea1f9d8718a32b1542e727d584d82f4453d5eeb"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:519895c99c815a1a24a926d5b60627ce5ea48e9f639a5cd328bda0515ea0f10c"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c7079d5eb1c1315a858bbf180000757db8ad904a89476653232db835c3114001"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:343ab62e9ca78094f2306aefed67dcfad61c4683f87eee48ff2fd74902447726"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:cd9e78285da6c9ba2d5c769628f43ef66d96ac3085e59b10ad4f3707980710d3"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:546cf886f6242dff9ec206331209db9c8e1643ae642dea5fdbecae2453cb50fd"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:02f6a8eb6512fdc2fd4ca10a49c341c4e109aa6e9448cc4859af5b949622715a"}, - {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:339ee4a4704bc724757cd5dd9dc8cf4d00980f5d3e6e06d5847c1b594ace68ab"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0a028b61a2e357ace98b1615fc03f76eb517cc028993964fe08ad514b1e8892d"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f90e552ecbad426eab352e7b2933091f2be77115bb16f09f78404861c8322981"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:d83e2d94b69bf31ead2fa45f0acdef0757fa0458a129734f59f67f3d2eb7ef32"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a02d3c48f9bb1e10c7788d92c0c7db6f2002d024ab6e74d6f45ae33e3d0288a3"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6d68ce8e7b2075390e8ac1e1d3a99e8b6372c694bbe612632606d1d546794207"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:453d037e09a5176d92ec0fd282e934ed26d806331a8b70ab431a81e2fbabf56d"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3b019d4ee84b683342af793b56bb35034bd749e4cbdd3d33f7d1107790f8c472"}, - {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb3942960f0beb9f46e2a71a3aca220d1ca32feb5a398656be934320804c0df9"}, - {file = "lxml-5.2.2-cp39-cp39-win32.whl", hash = "sha256:ac6540c9fff6e3813d29d0403ee7a81897f1d8ecc09a8ff84d2eea70ede1cdbf"}, - {file = "lxml-5.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:610b5c77428a50269f38a534057444c249976433f40f53e3b47e68349cca1425"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b537bd04d7ccd7c6350cdaaaad911f6312cbd61e6e6045542f781c7f8b2e99d2"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4820c02195d6dfb7b8508ff276752f6b2ff8b64ae5d13ebe02e7667e035000b9"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a09f6184f17a80897172863a655467da2b11151ec98ba8d7af89f17bf63dae"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:76acba4c66c47d27c8365e7c10b3d8016a7da83d3191d053a58382311a8bf4e1"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b128092c927eaf485928cec0c28f6b8bead277e28acf56800e972aa2c2abd7a2"}, - {file = "lxml-5.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ae791f6bd43305aade8c0e22f816b34f3b72b6c820477aab4d18473a37e8090b"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a2f6a1bc2460e643785a2cde17293bd7a8f990884b822f7bca47bee0a82fc66b"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e8d351ff44c1638cb6e980623d517abd9f580d2e53bfcd18d8941c052a5a009"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec4bd9133420c5c52d562469c754f27c5c9e36ee06abc169612c959bd7dbb07"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:55ce6b6d803890bd3cc89975fca9de1dff39729b43b73cb15ddd933b8bc20484"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ab6a358d1286498d80fe67bd3d69fcbc7d1359b45b41e74c4a26964ca99c3f8"}, - {file = "lxml-5.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:06668e39e1f3c065349c51ac27ae430719d7806c026fec462e5693b08b95696b"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9cd5323344d8ebb9fb5e96da5de5ad4ebab993bbf51674259dbe9d7a18049525"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89feb82ca055af0fe797a2323ec9043b26bc371365847dbe83c7fd2e2f181c34"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e481bba1e11ba585fb06db666bfc23dbe181dbafc7b25776156120bf12e0d5a6"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d6c6ea6a11ca0ff9cd0390b885984ed31157c168565702959c25e2191674a14"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3d98de734abee23e61f6b8c2e08a88453ada7d6486dc7cdc82922a03968928db"}, - {file = "lxml-5.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:69ab77a1373f1e7563e0fb5a29a8440367dec051da6c7405333699d07444f511"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:34e17913c431f5ae01d8658dbf792fdc457073dcdfbb31dc0cc6ab256e664a8d"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f8757b03208c3f50097761be2dea0aba02e94f0dc7023ed73a7bb14ff11eb0"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a520b4f9974b0a0a6ed73c2154de57cdfd0c8800f4f15ab2b73238ffed0b36e"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5e097646944b66207023bc3c634827de858aebc226d5d4d6d16f0b77566ea182"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b5e4ef22ff25bfd4ede5f8fb30f7b24446345f3e79d9b7455aef2836437bc38a"}, - {file = "lxml-5.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff69a9a0b4b17d78170c73abe2ab12084bdf1691550c5629ad1fe7849433f324"}, - {file = "lxml-5.2.2.tar.gz", hash = "sha256:bb2dc4898180bea79863d5487e5f9c7c34297414bad54bcd0f0852aee9cfdb87"}, +groups = ["main"] +markers = "extra == \"all\" or extra == \"url-preview\"" +files = [ + {file = "lxml-5.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e7bc6df34d42322c5289e37e9971d6ed114e3776b45fa879f734bded9d1fea9c"}, + {file = "lxml-5.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6854f8bd8a1536f8a1d9a3655e6354faa6406621cf857dc27b681b69860645c7"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:696ea9e87442467819ac22394ca36cb3d01848dad1be6fac3fb612d3bd5a12cf"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ef80aeac414f33c24b3815ecd560cee272786c3adfa5f31316d8b349bfade28"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b9c2754cef6963f3408ab381ea55f47dabc6f78f4b8ebb0f0b25cf1ac1f7609"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7a62cc23d754bb449d63ff35334acc9f5c02e6dae830d78dab4dd12b78a524f4"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f82125bc7203c5ae8633a7d5d20bcfdff0ba33e436e4ab0abc026a53a8960b7"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:b67319b4aef1a6c56576ff544b67a2a6fbd7eaee485b241cabf53115e8908b8f"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:a8ef956fce64c8551221f395ba21d0724fed6b9b6242ca4f2f7beb4ce2f41997"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:0a01ce7d8479dce84fc03324e3b0c9c90b1ece9a9bb6a1b6c9025e7e4520e78c"}, + {file = "lxml-5.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:91505d3ddebf268bb1588eb0f63821f738d20e1e7f05d3c647a5ca900288760b"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a3bcdde35d82ff385f4ede021df801b5c4a5bcdfb61ea87caabcebfc4945dc1b"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aea7c06667b987787c7d1f5e1dfcd70419b711cdb47d6b4bb4ad4b76777a0563"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:a7fb111eef4d05909b82152721a59c1b14d0f365e2be4c742a473c5d7372f4f5"}, + {file = "lxml-5.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:43d549b876ce64aa18b2328faff70f5877f8c6dede415f80a2f799d31644d776"}, + {file = "lxml-5.4.0-cp310-cp310-win32.whl", hash = "sha256:75133890e40d229d6c5837b0312abbe5bac1c342452cf0e12523477cd3aa21e7"}, + {file = "lxml-5.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:de5b4e1088523e2b6f730d0509a9a813355b7f5659d70eb4f319c76beea2e250"}, + {file = "lxml-5.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:98a3912194c079ef37e716ed228ae0dcb960992100461b704aea4e93af6b0bb9"}, + {file = "lxml-5.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ea0252b51d296a75f6118ed0d8696888e7403408ad42345d7dfd0d1e93309a7"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b92b69441d1bd39f4940f9eadfa417a25862242ca2c396b406f9272ef09cdcaa"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20e16c08254b9b6466526bc1828d9370ee6c0d60a4b64836bc3ac2917d1e16df"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7605c1c32c3d6e8c990dd28a0970a3cbbf1429d5b92279e37fda05fb0c92190e"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ecf4c4b83f1ab3d5a7ace10bafcb6f11df6156857a3c418244cef41ca9fa3e44"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cef4feae82709eed352cd7e97ae062ef6ae9c7b5dbe3663f104cd2c0e8d94ba"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:df53330a3bff250f10472ce96a9af28628ff1f4efc51ccba351a8820bca2a8ba"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:aefe1a7cb852fa61150fcb21a8c8fcea7b58c4cb11fbe59c97a0a4b31cae3c8c"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:ef5a7178fcc73b7d8c07229e89f8eb45b2908a9238eb90dcfc46571ccf0383b8"}, + {file = "lxml-5.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d2ed1b3cb9ff1c10e6e8b00941bb2e5bb568b307bfc6b17dffbbe8be5eecba86"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:72ac9762a9f8ce74c9eed4a4e74306f2f18613a6b71fa065495a67ac227b3056"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f5cb182f6396706dc6cc1896dd02b1c889d644c081b0cdec38747573db88a7d7"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3a3178b4873df8ef9457a4875703488eb1622632a9cee6d76464b60e90adbfcd"}, + {file = "lxml-5.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e094ec83694b59d263802ed03a8384594fcce477ce484b0cbcd0008a211ca751"}, + {file = "lxml-5.4.0-cp311-cp311-win32.whl", hash = "sha256:4329422de653cdb2b72afa39b0aa04252fca9071550044904b2e7036d9d97fe4"}, + {file = "lxml-5.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd3be6481ef54b8cfd0e1e953323b7aa9d9789b94842d0e5b142ef4bb7999539"}, + {file = "lxml-5.4.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b5aff6f3e818e6bdbbb38e5967520f174b18f539c2b9de867b1e7fde6f8d95a4"}, + {file = "lxml-5.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942a5d73f739ad7c452bf739a62a0f83e2578afd6b8e5406308731f4ce78b16d"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:460508a4b07364d6abf53acaa0a90b6d370fafde5693ef37602566613a9b0779"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:529024ab3a505fed78fe3cc5ddc079464e709f6c892733e3f5842007cec8ac6e"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ca56ebc2c474e8f3d5761debfd9283b8b18c76c4fc0967b74aeafba1f5647f9"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a81e1196f0a5b4167a8dafe3a66aa67c4addac1b22dc47947abd5d5c7a3f24b5"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00b8686694423ddae324cf614e1b9659c2edb754de617703c3d29ff568448df5"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:c5681160758d3f6ac5b4fea370495c48aac0989d6a0f01bb9a72ad8ef5ab75c4"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:2dc191e60425ad70e75a68c9fd90ab284df64d9cd410ba8d2b641c0c45bc006e"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:67f779374c6b9753ae0a0195a892a1c234ce8416e4448fe1e9f34746482070a7"}, + {file = "lxml-5.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:79d5bfa9c1b455336f52343130b2067164040604e41f6dc4d8313867ed540079"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3d3c30ba1c9b48c68489dc1829a6eede9873f52edca1dda900066542528d6b20"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1af80c6316ae68aded77e91cd9d80648f7dd40406cef73df841aa3c36f6907c8"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4d885698f5019abe0de3d352caf9466d5de2baded00a06ef3f1216c1a58ae78f"}, + {file = "lxml-5.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea53d51859b6c64e7c51d522c03cc2c48b9b5d6172126854cc7f01aa11f52bc"}, + {file = "lxml-5.4.0-cp312-cp312-win32.whl", hash = "sha256:d90b729fd2732df28130c064aac9bb8aff14ba20baa4aee7bd0795ff1187545f"}, + {file = "lxml-5.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1dc4ca99e89c335a7ed47d38964abcb36c5910790f9bd106f2a8fa2ee0b909d2"}, + {file = "lxml-5.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:773e27b62920199c6197130632c18fb7ead3257fce1ffb7d286912e56ddb79e0"}, + {file = "lxml-5.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ce9c671845de9699904b1e9df95acfe8dfc183f2310f163cdaa91a3535af95de"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9454b8d8200ec99a224df8854786262b1bd6461f4280064c807303c642c05e76"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cccd007d5c95279e529c146d095f1d39ac05139de26c098166c4beb9374b0f4d"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0fce1294a0497edb034cb416ad3e77ecc89b313cff7adbee5334e4dc0d11f422"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24974f774f3a78ac12b95e3a20ef0931795ff04dbb16db81a90c37f589819551"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:497cab4d8254c2a90bf988f162ace2ddbfdd806fce3bda3f581b9d24c852e03c"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:e794f698ae4c5084414efea0f5cc9f4ac562ec02d66e1484ff822ef97c2cadff"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:2c62891b1ea3094bb12097822b3d44b93fc6c325f2043c4d2736a8ff09e65f60"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:142accb3e4d1edae4b392bd165a9abdee8a3c432a2cca193df995bc3886249c8"}, + {file = "lxml-5.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1a42b3a19346e5601d1b8296ff6ef3d76038058f311902edd574461e9c036982"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4291d3c409a17febf817259cb37bc62cb7eb398bcc95c1356947e2871911ae61"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4f5322cf38fe0e21c2d73901abf68e6329dc02a4994e483adbcf92b568a09a54"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0be91891bdb06ebe65122aa6bf3fc94489960cf7e03033c6f83a90863b23c58b"}, + {file = "lxml-5.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:15a665ad90054a3d4f397bc40f73948d48e36e4c09f9bcffc7d90c87410e478a"}, + {file = "lxml-5.4.0-cp313-cp313-win32.whl", hash = "sha256:d5663bc1b471c79f5c833cffbc9b87d7bf13f87e055a5c86c363ccd2348d7e82"}, + {file = "lxml-5.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:bcb7a1096b4b6b24ce1ac24d4942ad98f983cd3810f9711bcd0293f43a9d8b9f"}, + {file = "lxml-5.4.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7be701c24e7f843e6788353c055d806e8bd8466b52907bafe5d13ec6a6dbaecd"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb54f7c6bafaa808f27166569b1511fc42701a7713858dddc08afdde9746849e"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97dac543661e84a284502e0cf8a67b5c711b0ad5fb661d1bd505c02f8cf716d7"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:c70e93fba207106cb16bf852e421c37bbded92acd5964390aad07cb50d60f5cf"}, + {file = "lxml-5.4.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9c886b481aefdf818ad44846145f6eaf373a20d200b5ce1a5c8e1bc2d8745410"}, + {file = "lxml-5.4.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:fa0e294046de09acd6146be0ed6727d1f42ded4ce3ea1e9a19c11b6774eea27c"}, + {file = "lxml-5.4.0-cp36-cp36m-win32.whl", hash = "sha256:61c7bbf432f09ee44b1ccaa24896d21075e533cd01477966a5ff5a71d88b2f56"}, + {file = "lxml-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7ce1a171ec325192c6a636b64c94418e71a1964f56d002cc28122fceff0b6121"}, + {file = "lxml-5.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:795f61bcaf8770e1b37eec24edf9771b307df3af74d1d6f27d812e15a9ff3872"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29f451a4b614a7b5b6c2e043d7b64a15bd8304d7e767055e8ab68387a8cacf4e"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:891f7f991a68d20c75cb13c5c9142b2a3f9eb161f1f12a9489c82172d1f133c0"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4aa412a82e460571fad592d0f93ce9935a20090029ba08eca05c614f99b0cc92"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:ac7ba71f9561cd7d7b55e1ea5511543c0282e2b6450f122672a2694621d63b7e"}, + {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:c5d32f5284012deaccd37da1e2cd42f081feaa76981f0eaa474351b68df813c5"}, + {file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:ce31158630a6ac85bddd6b830cffd46085ff90498b397bd0a259f59d27a12188"}, + {file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:31e63621e073e04697c1b2d23fcb89991790eef370ec37ce4d5d469f40924ed6"}, + {file = "lxml-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:be2ba4c3c5b7900246a8f866580700ef0d538f2ca32535e991027bdaba944063"}, + {file = "lxml-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:09846782b1ef650b321484ad429217f5154da4d6e786636c38e434fa32e94e49"}, + {file = "lxml-5.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eaf24066ad0b30917186420d51e2e3edf4b0e2ea68d8cd885b14dc8afdcf6556"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b31a3a77501d86d8ade128abb01082724c0dfd9524f542f2f07d693c9f1175f"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e108352e203c7afd0eb91d782582f00a0b16a948d204d4dec8565024fafeea5"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11a96c3b3f7551c8a8109aa65e8594e551d5a84c76bf950da33d0fb6dfafab7"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:ca755eebf0d9e62d6cb013f1261e510317a41bf4650f22963474a663fdfe02aa"}, + {file = "lxml-5.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:4cd915c0fb1bed47b5e6d6edd424ac25856252f09120e3e8ba5154b6b921860e"}, + {file = "lxml-5.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:226046e386556a45ebc787871d6d2467b32c37ce76c2680f5c608e25823ffc84"}, + {file = "lxml-5.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b108134b9667bcd71236c5a02aad5ddd073e372fb5d48ea74853e009fe38acb6"}, + {file = "lxml-5.4.0-cp38-cp38-win32.whl", hash = "sha256:1320091caa89805df7dcb9e908add28166113dcd062590668514dbd510798c88"}, + {file = "lxml-5.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:073eb6dcdf1f587d9b88c8c93528b57eccda40209cf9be549d469b942b41d70b"}, + {file = "lxml-5.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bda3ea44c39eb74e2488297bb39d47186ed01342f0022c8ff407c250ac3f498e"}, + {file = "lxml-5.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9ceaf423b50ecfc23ca00b7f50b64baba85fb3fb91c53e2c9d00bc86150c7e40"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:664cdc733bc87449fe781dbb1f309090966c11cc0c0cd7b84af956a02a8a4729"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67ed8a40665b84d161bae3181aa2763beea3747f748bca5874b4af4d75998f87"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b4a3bd174cc9cdaa1afbc4620c049038b441d6ba07629d89a83b408e54c35cd"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:b0989737a3ba6cf2a16efb857fb0dfa20bc5c542737fddb6d893fde48be45433"}, + {file = "lxml-5.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:dc0af80267edc68adf85f2a5d9be1cdf062f973db6790c1d065e45025fa26140"}, + {file = "lxml-5.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:639978bccb04c42677db43c79bdaa23785dc7f9b83bfd87570da8207872f1ce5"}, + {file = "lxml-5.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5a99d86351f9c15e4a901fc56404b485b1462039db59288b203f8c629260a142"}, + {file = "lxml-5.4.0-cp39-cp39-win32.whl", hash = "sha256:3e6d5557989cdc3ebb5302bbdc42b439733a841891762ded9514e74f60319ad6"}, + {file = "lxml-5.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:a8c9b7f16b63e65bbba889acb436a1034a82d34fa09752d754f88d708eca80e1"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1b717b00a71b901b4667226bba282dd462c42ccf618ade12f9ba3674e1fabc55"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27a9ded0f0b52098ff89dd4c418325b987feed2ea5cc86e8860b0f844285d740"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b7ce10634113651d6f383aa712a194179dcd496bd8c41e191cec2099fa09de5"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53370c26500d22b45182f98847243efb518d268374a9570409d2e2276232fd37"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c6364038c519dffdbe07e3cf42e6a7f8b90c275d4d1617a69bb59734c1a2d571"}, + {file = "lxml-5.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b12cb6527599808ada9eb2cd6e0e7d3d8f13fe7bbb01c6311255a15ded4c7ab4"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5f11a1526ebd0dee85e7b1e39e39a0cc0d9d03fb527f56d8457f6df48a10dc0c"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48b4afaf38bf79109bb060d9016fad014a9a48fb244e11b94f74ae366a64d252"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de6f6bb8a7840c7bf216fb83eec4e2f79f7325eca8858167b68708b929ab2172"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5cca36a194a4eb4e2ed6be36923d3cffd03dcdf477515dea687185506583d4c9"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b7c86884ad23d61b025989d99bfdd92a7351de956e01c61307cb87035960bcb1"}, + {file = "lxml-5.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:53d9469ab5460402c19553b56c3648746774ecd0681b1b27ea74d5d8a3ef5590"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:56dbdbab0551532bb26c19c914848d7251d73edb507c3079d6805fa8bba5b706"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14479c2ad1cb08b62bb941ba8e0e05938524ee3c3114644df905d2331c76cd57"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32697d2ea994e0db19c1df9e40275ffe84973e4232b5c274f47e7c1ec9763cdd"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:24f6df5f24fc3385f622c0c9d63fe34604893bc1a5bdbb2dbf5870f85f9a404a"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:151d6c40bc9db11e960619d2bf2ec5829f0aaffb10b41dcf6ad2ce0f3c0b2325"}, + {file = "lxml-5.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4025bf2884ac4370a3243c5aa8d66d3cb9e15d3ddd0af2d796eccc5f0244390e"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9459e6892f59ecea2e2584ee1058f5d8f629446eab52ba2305ae13a32a059530"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47fb24cc0f052f0576ea382872b3fc7e1f7e3028e53299ea751839418ade92a6"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50441c9de951a153c698b9b99992e806b71c1f36d14b154592580ff4a9d0d877"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ab339536aa798b1e17750733663d272038bf28069761d5be57cb4a9b0137b4f8"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9776af1aad5a4b4a1317242ee2bea51da54b2a7b7b48674be736d463c999f37d"}, + {file = "lxml-5.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:63e7968ff83da2eb6fdda967483a7a023aa497d85ad8f05c3ad9b1f2e8c84987"}, + {file = "lxml-5.4.0.tar.gz", hash = "sha256:d12832e1dbea4be280b22fd0ea7c9b87f0d8fc51ba06e92dc62d52f804f78ebd"}, ] [package.extras] cssselect = ["cssselect (>=0.7)"] -html-clean = ["lxml-html-clean"] +html-clean = ["lxml_html_clean"] html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=3.0.10)"] +source = ["Cython (>=3.0.11,<3.1.0)"] [[package]] name = "lxml-stubs" @@ -1160,6 +1193,7 @@ version = "0.5.1" description = "Type annotations for the lxml package" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "lxml-stubs-0.5.1.tar.gz", hash = "sha256:e0ec2aa1ce92d91278b719091ce4515c12adc1d564359dfaf81efa7d4feab79d"}, {file = "lxml_stubs-0.5.1-py3-none-any.whl", hash = "sha256:1f689e5dbc4b9247cb09ae820c7d34daeb1fdbd1db06123814b856dae7787272"}, @@ -1174,6 +1208,7 @@ version = "2.2.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, @@ -1194,61 +1229,73 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" -version = "2.1.2" +version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false -python-versions = ">=3.7" -files = [ - {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"}, - {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"}, +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, ] [[package]] @@ -1257,6 +1304,7 @@ version = "1.3.0" description = "Common utilities for Synapse, Sydent and Sygnal" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "matrix_common-1.3.0-py3-none-any.whl", hash = "sha256:524e2785b9b03be4d15f3a8a6b857c5b6af68791ffb1b9918f0ad299abc4db20"}, {file = "matrix_common-1.3.0.tar.gz", hash = "sha256:62e121cccd9f243417b57ec37a76dc44aeb198a7a5c67afd6b8275992ff2abd1"}, @@ -1275,6 +1323,8 @@ version = "0.3.0" description = "An LDAP3 auth provider for Synapse" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" files = [ {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"}, {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"}, @@ -1294,6 +1344,7 @@ version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, @@ -1305,6 +1356,8 @@ version = "9.1.0" description = "More routines for operating on iterables, beyond itertools" optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" files = [ {file = "more-itertools-9.1.0.tar.gz", hash = "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d"}, {file = "more_itertools-9.1.0-py3-none-any.whl", hash = "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"}, @@ -1312,112 +1365,128 @@ files = [ [[package]] name = "msgpack" -version = "1.0.8" +version = "1.1.0" description = "MessagePack serializer" optional = false python-versions = ">=3.8" -files = [ - {file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:505fe3d03856ac7d215dbe005414bc28505d26f0c128906037e66d98c4e95868"}, - {file = "msgpack-1.0.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6b7842518a63a9f17107eb176320960ec095a8ee3b4420b5f688e24bf50c53c"}, - {file = "msgpack-1.0.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:376081f471a2ef24828b83a641a02c575d6103a3ad7fd7dade5486cad10ea659"}, - {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e390971d082dba073c05dbd56322427d3280b7cc8b53484c9377adfbae67dc2"}, - {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e073efcba9ea99db5acef3959efa45b52bc67b61b00823d2a1a6944bf45982"}, - {file = "msgpack-1.0.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82d92c773fbc6942a7a8b520d22c11cfc8fd83bba86116bfcf962c2f5c2ecdaa"}, - {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9ee32dcb8e531adae1f1ca568822e9b3a738369b3b686d1477cbc643c4a9c128"}, - {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e3aa7e51d738e0ec0afbed661261513b38b3014754c9459508399baf14ae0c9d"}, - {file = "msgpack-1.0.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69284049d07fce531c17404fcba2bb1df472bc2dcdac642ae71a2d079d950653"}, - {file = "msgpack-1.0.8-cp310-cp310-win32.whl", hash = "sha256:13577ec9e247f8741c84d06b9ece5f654920d8365a4b636ce0e44f15e07ec693"}, - {file = "msgpack-1.0.8-cp310-cp310-win_amd64.whl", hash = "sha256:e532dbd6ddfe13946de050d7474e3f5fb6ec774fbb1a188aaf469b08cf04189a"}, - {file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9517004e21664f2b5a5fd6333b0731b9cf0817403a941b393d89a2f1dc2bd836"}, - {file = "msgpack-1.0.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d16a786905034e7e34098634b184a7d81f91d4c3d246edc6bd7aefb2fd8ea6ad"}, - {file = "msgpack-1.0.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2872993e209f7ed04d963e4b4fbae72d034844ec66bc4ca403329db2074377b"}, - {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c330eace3dd100bdb54b5653b966de7f51c26ec4a7d4e87132d9b4f738220ba"}, - {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b5c044f3eff2a6534768ccfd50425939e7a8b5cf9a7261c385de1e20dcfc85"}, - {file = "msgpack-1.0.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1876b0b653a808fcd50123b953af170c535027bf1d053b59790eebb0aeb38950"}, - {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dfe1f0f0ed5785c187144c46a292b8c34c1295c01da12e10ccddfc16def4448a"}, - {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3528807cbbb7f315bb81959d5961855e7ba52aa60a3097151cb21956fbc7502b"}, - {file = "msgpack-1.0.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e2f879ab92ce502a1e65fce390eab619774dda6a6ff719718069ac94084098ce"}, - {file = "msgpack-1.0.8-cp311-cp311-win32.whl", hash = "sha256:26ee97a8261e6e35885c2ecd2fd4a6d38252246f94a2aec23665a4e66d066305"}, - {file = "msgpack-1.0.8-cp311-cp311-win_amd64.whl", hash = "sha256:eadb9f826c138e6cf3c49d6f8de88225a3c0ab181a9b4ba792e006e5292d150e"}, - {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:114be227f5213ef8b215c22dde19532f5da9652e56e8ce969bf0a26d7c419fee"}, - {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d661dc4785affa9d0edfdd1e59ec056a58b3dbb9f196fa43587f3ddac654ac7b"}, - {file = "msgpack-1.0.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d56fd9f1f1cdc8227d7b7918f55091349741904d9520c65f0139a9755952c9e8"}, - {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0726c282d188e204281ebd8de31724b7d749adebc086873a59efb8cf7ae27df3"}, - {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8db8e423192303ed77cff4dce3a4b88dbfaf43979d280181558af5e2c3c71afc"}, - {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99881222f4a8c2f641f25703963a5cefb076adffd959e0558dc9f803a52d6a58"}, - {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b5505774ea2a73a86ea176e8a9a4a7c8bf5d521050f0f6f8426afe798689243f"}, - {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ef254a06bcea461e65ff0373d8a0dd1ed3aa004af48839f002a0c994a6f72d04"}, - {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e1dd7839443592d00e96db831eddb4111a2a81a46b028f0facd60a09ebbdd543"}, - {file = "msgpack-1.0.8-cp312-cp312-win32.whl", hash = "sha256:64d0fcd436c5683fdd7c907eeae5e2cbb5eb872fafbc03a43609d7941840995c"}, - {file = "msgpack-1.0.8-cp312-cp312-win_amd64.whl", hash = "sha256:74398a4cf19de42e1498368c36eed45d9528f5fd0155241e82c4082b7e16cffd"}, - {file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0ceea77719d45c839fd73abcb190b8390412a890df2f83fb8cf49b2a4b5c2f40"}, - {file = "msgpack-1.0.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ab0bbcd4d1f7b6991ee7c753655b481c50084294218de69365f8f1970d4c151"}, - {file = "msgpack-1.0.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1cce488457370ffd1f953846f82323cb6b2ad2190987cd4d70b2713e17268d24"}, - {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3923a1778f7e5ef31865893fdca12a8d7dc03a44b33e2a5f3295416314c09f5d"}, - {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22e47578b30a3e199ab067a4d43d790249b3c0587d9a771921f86250c8435db"}, - {file = "msgpack-1.0.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd739c9251d01e0279ce729e37b39d49a08c0420d3fee7f2a4968c0576678f77"}, - {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d3420522057ebab1728b21ad473aa950026d07cb09da41103f8e597dfbfaeb13"}, - {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5845fdf5e5d5b78a49b826fcdc0eb2e2aa7191980e3d2cfd2a30303a74f212e2"}, - {file = "msgpack-1.0.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a0e76621f6e1f908ae52860bdcb58e1ca85231a9b0545e64509c931dd34275a"}, - {file = "msgpack-1.0.8-cp38-cp38-win32.whl", hash = "sha256:374a8e88ddab84b9ada695d255679fb99c53513c0a51778796fcf0944d6c789c"}, - {file = "msgpack-1.0.8-cp38-cp38-win_amd64.whl", hash = "sha256:f3709997b228685fe53e8c433e2df9f0cdb5f4542bd5114ed17ac3c0129b0480"}, - {file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f51bab98d52739c50c56658cc303f190785f9a2cd97b823357e7aeae54c8f68a"}, - {file = "msgpack-1.0.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:73ee792784d48aa338bba28063e19a27e8d989344f34aad14ea6e1b9bd83f596"}, - {file = "msgpack-1.0.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f9904e24646570539a8950400602d66d2b2c492b9010ea7e965025cb71d0c86d"}, - {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e75753aeda0ddc4c28dce4c32ba2f6ec30b1b02f6c0b14e547841ba5b24f753f"}, - {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dbf059fb4b7c240c873c1245ee112505be27497e90f7c6591261c7d3c3a8228"}, - {file = "msgpack-1.0.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4916727e31c28be8beaf11cf117d6f6f188dcc36daae4e851fee88646f5b6b18"}, - {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7938111ed1358f536daf311be244f34df7bf3cdedb3ed883787aca97778b28d8"}, - {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:493c5c5e44b06d6c9268ce21b302c9ca055c1fd3484c25ba41d34476c76ee746"}, - {file = "msgpack-1.0.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fbb160554e319f7b22ecf530a80a3ff496d38e8e07ae763b9e82fadfe96f273"}, - {file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"}, - {file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"}, - {file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"}, +groups = ["main"] +files = [ + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ad442d527a7e358a469faf43fda45aaf4ac3249c8310a82f0ccff9164e5dccd"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74bed8f63f8f14d75eec75cf3d04ad581da6b914001b474a5d3cd3372c8cc27d"}, + {file = "msgpack-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:914571a2a5b4e7606997e169f64ce53a8b1e06f2cf2c3a7273aa106236d43dd5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c921af52214dcbb75e6bdf6a661b23c3e6417f00c603dd2070bccb5c3ef499f5"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8ce0b22b890be5d252de90d0e0d119f363012027cf256185fc3d474c44b1b9e"}, + {file = "msgpack-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73322a6cc57fcee3c0c57c4463d828e9428275fb85a27aa2aa1a92fdc42afd7b"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e1f3c3d21f7cf67bcf2da8e494d30a75e4cf60041d98b3f79875afb5b96f3a3f"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64fc9068d701233effd61b19efb1485587560b66fe57b3e50d29c5d78e7fef68"}, + {file = "msgpack-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:42f754515e0f683f9c79210a5d1cad631ec3d06cea5172214d2176a42e67e19b"}, + {file = "msgpack-1.1.0-cp310-cp310-win32.whl", hash = "sha256:3df7e6b05571b3814361e8464f9304c42d2196808e0119f55d0d3e62cd5ea044"}, + {file = "msgpack-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:685ec345eefc757a7c8af44a3032734a739f8c45d1b0ac45efc5d8977aa4720f"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d364a55082fb2a7416f6c63ae383fbd903adb5a6cf78c5b96cc6316dc1cedc7"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79ec007767b9b56860e0372085f8504db5d06bd6a327a335449508bbee9648fa"}, + {file = "msgpack-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ad622bf7756d5a497d5b6836e7fc3752e2dd6f4c648e24b1803f6048596f701"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e59bca908d9ca0de3dc8684f21ebf9a690fe47b6be93236eb40b99af28b6ea6"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1da8f11a3dd397f0a32c76165cf0c4eb95b31013a94f6ecc0b280c05c91b59"}, + {file = "msgpack-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452aff037287acb1d70a804ffd022b21fa2bb7c46bee884dbc864cc9024128a0"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8da4bf6d54ceed70e8861f833f83ce0814a2b72102e890cbdfe4b34764cdd66e"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:41c991beebf175faf352fb940bf2af9ad1fb77fd25f38d9142053914947cdbf6"}, + {file = "msgpack-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a52a1f3a5af7ba1c9ace055b659189f6c669cf3657095b50f9602af3a3ba0fe5"}, + {file = "msgpack-1.1.0-cp311-cp311-win32.whl", hash = "sha256:58638690ebd0a06427c5fe1a227bb6b8b9fdc2bd07701bec13c2335c82131a88"}, + {file = "msgpack-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd2906780f25c8ed5d7b323379f6138524ba793428db5d0e9d226d3fa6aa1788"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"}, + {file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"}, + {file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:071603e2f0771c45ad9bc65719291c568d4edf120b44eb36324dcb02a13bfddf"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f92a83b84e7c0749e3f12821949d79485971f087604178026085f60ce109330"}, + {file = "msgpack-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1964df7b81285d00a84da4e70cb1383f2e665e0f1f2a7027e683956d04b734"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59caf6a4ed0d164055ccff8fe31eddc0ebc07cf7326a2aaa0dbf7a4001cd823e"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0907e1a7119b337971a689153665764adc34e89175f9a34793307d9def08e6ca"}, + {file = "msgpack-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65553c9b6da8166e819a6aa90ad15288599b340f91d18f60b2061f402b9a4915"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7a946a8992941fea80ed4beae6bff74ffd7ee129a90b4dd5cf9c476a30e9708d"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4b51405e36e075193bc051315dbf29168d6141ae2500ba8cd80a522964e31434"}, + {file = "msgpack-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4c01941fd2ff87c2a934ee6055bda4ed353a7846b8d4f341c428109e9fcde8c"}, + {file = "msgpack-1.1.0-cp313-cp313-win32.whl", hash = "sha256:7c9a35ce2c2573bada929e0b7b3576de647b0defbd25f5139dcdaba0ae35a4cc"}, + {file = "msgpack-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:bce7d9e614a04d0883af0b3d4d501171fbfca038f12c77fa838d9f198147a23f"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c40ffa9a15d74e05ba1fe2681ea33b9caffd886675412612d93ab17b58ea2fec"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1ba6136e650898082d9d5a5217d5906d1e138024f836ff48691784bbe1adf96"}, + {file = "msgpack-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0856a2b7e8dcb874be44fea031d22e5b3a19121be92a1e098f46068a11b0870"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:471e27a5787a2e3f974ba023f9e265a8c7cfd373632247deb225617e3100a3c7"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:646afc8102935a388ffc3914b336d22d1c2d6209c773f3eb5dd4d6d3b6f8c1cb"}, + {file = "msgpack-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13599f8829cfbe0158f6456374e9eea9f44eee08076291771d8ae93eda56607f"}, + {file = "msgpack-1.1.0-cp38-cp38-win32.whl", hash = "sha256:8a84efb768fb968381e525eeeb3d92857e4985aacc39f3c47ffd00eb4509315b"}, + {file = "msgpack-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:879a7b7b0ad82481c52d3c7eb99bf6f0645dbdec5134a4bddbd16f3506947feb"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:53258eeb7a80fc46f62fd59c876957a2d0e15e6449a9e71842b6d24419d88ca1"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e7b853bbc44fb03fbdba34feb4bd414322180135e2cb5164f20ce1c9795ee48"}, + {file = "msgpack-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3e9b4936df53b970513eac1758f3882c88658a220b58dcc1e39606dccaaf01c"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46c34e99110762a76e3911fc923222472c9d681f1094096ac4102c18319e6468"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a706d1e74dd3dea05cb54580d9bd8b2880e9264856ce5068027eed09680aa74"}, + {file = "msgpack-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:534480ee5690ab3cbed89d4c8971a5c631b69a8c0883ecfea96c19118510c846"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8cf9e8c3a2153934a23ac160cc4cba0ec035f6867c8013cc6077a79823370346"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3180065ec2abbe13a4ad37688b61b99d7f9e012a535b930e0e683ad6bc30155b"}, + {file = "msgpack-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5a91481a3cc573ac8c0d9aace09345d989dc4a0202b7fcb312c88c26d4e71a8"}, + {file = "msgpack-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f80bc7d47f76089633763f952e67f8214cb7b3ee6bfa489b3cb6a84cfac114cd"}, + {file = "msgpack-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:4d1b7ff2d6146e16e8bd665ac726a89c74163ef8cd39fa8c1087d4e52d3a2325"}, + {file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"}, ] [[package]] name = "mypy" -version = "1.10.1" +version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" -files = [ - {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, - {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, - {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, - {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, - {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, - {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, - {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, - {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, - {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, - {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, - {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, - {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, - {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, - {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, - {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, - {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, - {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, - {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, - {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, +groups = ["dev"] +files = [ + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, ] [package.dependencies] mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] @@ -1428,6 +1497,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -1435,16 +1505,18 @@ files = [ [[package]] name = "mypy-zope" -version = "1.0.5" +version = "1.0.11" description = "Plugin for mypy to support zope interfaces" optional = false python-versions = "*" +groups = ["dev"] files = [ - {file = "mypy_zope-1.0.5.tar.gz", hash = "sha256:2440406d49c0e1199c1cd819c92a2c4957de65579c6abc8a081c927f4bdc8d49"}, + {file = "mypy_zope-1.0.11-py3-none-any.whl", hash = "sha256:4395d716b43ab89916edf6d0b5761655b4d4a43b2692fce806bbd733829977ee"}, + {file = "mypy_zope-1.0.11.tar.gz", hash = "sha256:1c95e49e9dcdf070a0858f067dac55e8e4e47519fdc15dfdab9b7eee273a0e01"}, ] [package.dependencies] -mypy = ">=1.0.0,<1.11.0" +mypy = ">=1.0.0,<1.16.0" "zope.interface" = "*" "zope.schema" = "*" @@ -1457,6 +1529,7 @@ version = "1.3.0" description = "A network address manipulation library for Python" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "netaddr-1.3.0-py3-none-any.whl", hash = "sha256:c2c6a8ebe5554ce33b7d5b3a306b71bbb373e000bbbf2350dd5213cc56e3dbbe"}, {file = "netaddr-1.3.0.tar.gz", hash = "sha256:5c3c3d9895b551b763779ba7db7a03487dc1f8e3b385af819af341ae9ef6e48a"}, @@ -1471,6 +1544,8 @@ version = "2.4.0" description = "OpenTracing API for Python. See documentation at http://opentracing.io" optional = true python-versions = "*" +groups = ["main"] +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, ] @@ -1480,13 +1555,14 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte [[package]] name = "packaging" -version = "24.1" +version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, ] [[package]] @@ -1495,6 +1571,7 @@ version = "0.9.0" description = "Parameterized testing with any Python test framework" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b"}, {file = "parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1"}, @@ -1504,173 +1581,127 @@ files = [ dev = ["jinja2"] [[package]] -name = "pathspec" -version = "0.11.1" -description = "Utility library for gitignore style pattern matching of file paths." -optional = false -python-versions = ">=3.7" -files = [ - {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, - {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, -] - -[[package]] name = "phonenumbers" -version = "8.13.42" +version = "9.0.2" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" +groups = ["main"] files = [ - {file = "phonenumbers-8.13.42-py2.py3-none-any.whl", hash = "sha256:18acc22ee03116d27b26e990f53806a1770a3e05f05e1620bc09ad187f889456"}, - {file = "phonenumbers-8.13.42.tar.gz", hash = "sha256:7137904f2db3b991701e853174ce8e1cb8f540b8bfdf27617540de04c0b7bed5"}, + {file = "phonenumbers-9.0.2-py2.py3-none-any.whl", hash = "sha256:dbcec6bdfdf3973f60b81dc0fcac3f7b1638f877ac42da4d7b46724ed413e2b9"}, + {file = "phonenumbers-9.0.2.tar.gz", hash = "sha256:f590ee2b729bdd9873ca2d52989466add14c9953b48805c0aeb408348d4d6224"}, ] [[package]] name = "pillow" -version = "10.4.0" +version = "11.2.1" description = "Python Imaging Library (Fork)" optional = false -python-versions = ">=3.8" -files = [ - {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, - {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, - {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, - {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, - {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, - {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, - {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, - {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, - {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, - {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, - {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, - {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, - {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, - {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, - {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, - {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, - {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, - {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, - {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, - {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, - {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, - {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, - {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, - {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, - {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, - {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, - {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, - {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, - {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, - {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, - {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pillow-11.2.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:d57a75d53922fc20c165016a20d9c44f73305e67c351bbc60d1adaf662e74047"}, + {file = "pillow-11.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:127bf6ac4a5b58b3d32fc8289656f77f80567d65660bc46f72c0d77e6600cc95"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4ba4be812c7a40280629e55ae0b14a0aafa150dd6451297562e1764808bbe61"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8bd62331e5032bc396a93609982a9ab6b411c05078a52f5fe3cc59234a3abd1"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:562d11134c97a62fe3af29581f083033179f7ff435f78392565a1ad2d1c2c45c"}, + {file = "pillow-11.2.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:c97209e85b5be259994eb5b69ff50c5d20cca0f458ef9abd835e262d9d88b39d"}, + {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0c3e6d0f59171dfa2e25d7116217543310908dfa2770aa64b8f87605f8cacc97"}, + {file = "pillow-11.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc1c3bc53befb6096b84165956e886b1729634a799e9d6329a0c512ab651e579"}, + {file = "pillow-11.2.1-cp310-cp310-win32.whl", hash = "sha256:312c77b7f07ab2139924d2639860e084ec2a13e72af54d4f08ac843a5fc9c79d"}, + {file = "pillow-11.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:9bc7ae48b8057a611e5fe9f853baa88093b9a76303937449397899385da06fad"}, + {file = "pillow-11.2.1-cp310-cp310-win_arm64.whl", hash = "sha256:2728567e249cdd939f6cc3d1f049595c66e4187f3c34078cbc0a7d21c47482d2"}, + {file = "pillow-11.2.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35ca289f712ccfc699508c4658a1d14652e8033e9b69839edf83cbdd0ba39e70"}, + {file = "pillow-11.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e0409af9f829f87a2dfb7e259f78f317a5351f2045158be321fd135973fff7bf"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4e5c5edee874dce4f653dbe59db7c73a600119fbea8d31f53423586ee2aafd7"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b93a07e76d13bff9444f1a029e0af2964e654bfc2e2c2d46bfd080df5ad5f3d8"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:e6def7eed9e7fa90fde255afaf08060dc4b343bbe524a8f69bdd2a2f0018f600"}, + {file = "pillow-11.2.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8f4f3724c068be008c08257207210c138d5f3731af6c155a81c2b09a9eb3a788"}, + {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a0a6709b47019dff32e678bc12c63008311b82b9327613f534e496dacaefb71e"}, + {file = "pillow-11.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f6b0c664ccb879109ee3ca702a9272d877f4fcd21e5eb63c26422fd6e415365e"}, + {file = "pillow-11.2.1-cp311-cp311-win32.whl", hash = "sha256:cc5d875d56e49f112b6def6813c4e3d3036d269c008bf8aef72cd08d20ca6df6"}, + {file = "pillow-11.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:0f5c7eda47bf8e3c8a283762cab94e496ba977a420868cb819159980b6709193"}, + {file = "pillow-11.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:4d375eb838755f2528ac8cbc926c3e31cc49ca4ad0cf79cff48b20e30634a4a7"}, + {file = "pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f"}, + {file = "pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d"}, + {file = "pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4"}, + {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443"}, + {file = "pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c"}, + {file = "pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3"}, + {file = "pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941"}, + {file = "pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb"}, + {file = "pillow-11.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fdec757fea0b793056419bca3e9932eb2b0ceec90ef4813ea4c1e072c389eb28"}, + {file = "pillow-11.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b0e130705d568e2f43a17bcbe74d90958e8a16263868a12c3e0d9c8162690830"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bdb5e09068332578214cadd9c05e3d64d99e0e87591be22a324bdbc18925be0"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d189ba1bebfbc0c0e529159631ec72bb9e9bc041f01ec6d3233d6d82eb823bc1"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:191955c55d8a712fab8934a42bfefbf99dd0b5875078240943f913bb66d46d9f"}, + {file = "pillow-11.2.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:ad275964d52e2243430472fc5d2c2334b4fc3ff9c16cb0a19254e25efa03a155"}, + {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:750f96efe0597382660d8b53e90dd1dd44568a8edb51cb7f9d5d918b80d4de14"}, + {file = "pillow-11.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fe15238d3798788d00716637b3d4e7bb6bde18b26e5d08335a96e88564a36b6b"}, + {file = "pillow-11.2.1-cp313-cp313-win32.whl", hash = "sha256:3fe735ced9a607fee4f481423a9c36701a39719252a9bb251679635f99d0f7d2"}, + {file = "pillow-11.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:74ee3d7ecb3f3c05459ba95eed5efa28d6092d751ce9bf20e3e253a4e497e691"}, + {file = "pillow-11.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:5119225c622403afb4b44bad4c1ca6c1f98eed79db8d3bc6e4e160fc6339d66c"}, + {file = "pillow-11.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8ce2e8411c7aaef53e6bb29fe98f28cd4fbd9a1d9be2eeea434331aac0536b22"}, + {file = "pillow-11.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:9ee66787e095127116d91dea2143db65c7bb1e232f617aa5957c0d9d2a3f23a7"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9622e3b6c1d8b551b6e6f21873bdcc55762b4b2126633014cea1803368a9aa16"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63b5dff3a68f371ea06025a1a6966c9a1e1ee452fc8020c2cd0ea41b83e9037b"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:31df6e2d3d8fc99f993fd253e97fae451a8db2e7207acf97859732273e108406"}, + {file = "pillow-11.2.1-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:062b7a42d672c45a70fa1f8b43d1d38ff76b63421cbbe7f88146b39e8a558d91"}, + {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4eb92eca2711ef8be42fd3f67533765d9fd043b8c80db204f16c8ea62ee1a751"}, + {file = "pillow-11.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f91ebf30830a48c825590aede79376cb40f110b387c17ee9bd59932c961044f9"}, + {file = "pillow-11.2.1-cp313-cp313t-win32.whl", hash = "sha256:e0b55f27f584ed623221cfe995c912c61606be8513bfa0e07d2c674b4516d9dd"}, + {file = "pillow-11.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:36d6b82164c39ce5482f649b437382c0fb2395eabc1e2b1702a6deb8ad647d6e"}, + {file = "pillow-11.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:225c832a13326e34f212d2072982bb1adb210e0cc0b153e688743018c94a2681"}, + {file = "pillow-11.2.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:7491cf8a79b8eb867d419648fff2f83cb0b3891c8b36da92cc7f1931d46108c8"}, + {file = "pillow-11.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b02d8f9cb83c52578a0b4beadba92e37d83a4ef11570a8688bbf43f4ca50909"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:014ca0050c85003620526b0ac1ac53f56fc93af128f7546623cc8e31875ab928"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3692b68c87096ac6308296d96354eddd25f98740c9d2ab54e1549d6c8aea9d79"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:f781dcb0bc9929adc77bad571b8621ecb1e4cdef86e940fe2e5b5ee24fd33b35"}, + {file = "pillow-11.2.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:2b490402c96f907a166615e9a5afacf2519e28295f157ec3a2bb9bd57de638cb"}, + {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dd6b20b93b3ccc9c1b597999209e4bc5cf2853f9ee66e3fc9a400a78733ffc9a"}, + {file = "pillow-11.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4b835d89c08a6c2ee7781b8dd0a30209a8012b5f09c0a665b65b0eb3560b6f36"}, + {file = "pillow-11.2.1-cp39-cp39-win32.whl", hash = "sha256:b10428b3416d4f9c61f94b494681280be7686bda15898a3a9e08eb66a6d92d67"}, + {file = "pillow-11.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:6ebce70c3f486acf7591a3d73431fa504a4e18a9b97ff27f5f47b7368e4b9dd1"}, + {file = "pillow-11.2.1-cp39-cp39-win_arm64.whl", hash = "sha256:c27476257b2fdcd7872d54cfd119b3a9ce4610fb85c8e32b70b42e3680a29a1e"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9b7b0d4fd2635f54ad82785d56bc0d94f147096493a79985d0ab57aedd563156"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:aa442755e31c64037aa7c1cb186e0b369f8416c567381852c63444dd666fb772"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0d3348c95b766f54b76116d53d4cb171b52992a1027e7ca50c81b43b9d9e363"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85d27ea4c889342f7e35f6d56e7e1cb345632ad592e8c51b693d7b7556043ce0"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bf2c33d6791c598142f00c9c4c7d47f6476731c31081331664eb26d6ab583e01"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e616e7154c37669fc1dfc14584f11e284e05d1c650e1c0f972f281c4ccc53193"}, + {file = "pillow-11.2.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:39ad2e0f424394e3aebc40168845fee52df1394a4673a6ee512d840d14ab3013"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:80f1df8dbe9572b4b7abdfa17eb5d78dd620b1d55d9e25f834efdbee872d3aed"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ea926cfbc3957090becbcbbb65ad177161a2ff2ad578b5a6ec9bb1e1cd78753c"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:738db0e0941ca0376804d4de6a782c005245264edaa253ffce24e5a15cbdc7bd"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9db98ab6565c69082ec9b0d4e40dd9f6181dab0dd236d26f7a50b8b9bfbd5076"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:036e53f4170e270ddb8797d4c590e6dd14d28e15c7da375c18978045f7e6c37b"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:14f73f7c291279bd65fda51ee87affd7c1e097709f7fdd0188957a16c264601f"}, + {file = "pillow-11.2.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:208653868d5c9ecc2b327f9b9ef34e0e42a4cdd172c2988fd81d62d2bc9bc044"}, + {file = "pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] -typing = ["typing-extensions"] +test-arrow = ["pyarrow"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] +typing = ["typing-extensions ; python_version < \"3.10\""] xmp = ["defusedxml"] [[package]] -name = "pkginfo" -version = "1.9.6" -description = "Query metadata from sdists / bdists / installed packages." -optional = false -python-versions = ">=3.6" -files = [ - {file = "pkginfo-1.9.6-py3-none-any.whl", hash = "sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546"}, - {file = "pkginfo-1.9.6.tar.gz", hash = "sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046"}, -] - -[package.extras] -testing = ["pytest", "pytest-cov"] - -[[package]] -name = "pkgutil-resolve-name" -version = "1.3.10" -description = "Resolve a name to an object." -optional = false -python-versions = ">=3.6" -files = [ - {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, - {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, -] - -[[package]] -name = "platformdirs" -version = "3.1.1" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -optional = false -python-versions = ">=3.7" -files = [ - {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"}, - {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"}, -] - -[package.extras] -docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] - -[[package]] name = "prometheus-client" -version = "0.20.0" +version = "0.21.0" description = "Python client for the Prometheus monitoring system." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "prometheus_client-0.20.0-py3-none-any.whl", hash = "sha256:cde524a85bce83ca359cc837f28b8c0db5cac7aa653a588fd7e84ba061c329e7"}, - {file = "prometheus_client-0.20.0.tar.gz", hash = "sha256:287629d00b147a32dcb2be0b9df905da599b2d82f80377083ec8463309a4bb89"}, + {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"}, + {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"}, ] [package.extras] @@ -1678,24 +1709,23 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.9" +version = "2.9.10" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"all\" or extra == \"postgres\"" files = [ - {file = "psycopg2-2.9.9-cp310-cp310-win32.whl", hash = "sha256:38a8dcc6856f569068b47de286b472b7c473ac7977243593a288ebce0dc89516"}, - {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, - {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, - {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, - {file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"}, - {file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"}, - {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, - {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, - {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, - {file = "psycopg2-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:bac58c024c9922c23550af2a581998624d6e02350f4ae9c5f0bc642c633a2d5e"}, - {file = "psycopg2-2.9.9-cp39-cp39-win32.whl", hash = "sha256:c92811b2d4c9b6ea0285942b2e7cac98a59e166d59c588fe5cfe1eda58e72d59"}, - {file = "psycopg2-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:de80739447af31525feddeb8effd640782cf5998e1a4e9192ebdf829717e3913"}, - {file = "psycopg2-2.9.9.tar.gz", hash = "sha256:d1454bde93fb1e224166811694d600e746430c006fbb031ea06ecc2ea41bf156"}, + {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, + {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, + {file = "psycopg2-2.9.10-cp311-cp311-win32.whl", hash = "sha256:47c4f9875125344f4c2b870e41b6aad585901318068acd01de93f3677a6522c2"}, + {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, + {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, + {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, + {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"}, + {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, + {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, + {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, ] [[package]] @@ -1704,6 +1734,8 @@ version = "2.9.0" description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=master" optional = true python-versions = "*" +groups = ["main"] +markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" files = [ {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"}, ] @@ -1718,6 +1750,8 @@ version = "1.1" description = "A Simple library to enable psycopg2 compatability" optional = true python-versions = "*" +groups = ["main"] +markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" files = [ {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"}, ] @@ -1727,24 +1761,26 @@ psycopg2 = "*" [[package]] name = "pyasn1" -version = "0.6.0" +version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, - {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] [[package]] name = "pyasn1-modules" -version = "0.4.0" +version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, - {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, ] [package.dependencies] @@ -1756,6 +1792,7 @@ version = "2.21" description = "C parser in Python" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["main", "dev"] files = [ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, @@ -1763,122 +1800,133 @@ files = [ [[package]] name = "pydantic" -version = "2.8.2" +version = "2.11.4" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main", "dev"] files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, + {file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"}, + {file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" -typing-extensions = [ - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, -] +annotated-types = ">=0.6.0" +pydantic-core = "2.33.2" +typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" -version = "2.20.1" +version = "2.33.2" description = "Core functionality for Pydantic validation and serialization" optional = false -python-versions = ">=3.8" -files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, ] [package.dependencies] @@ -1886,13 +1934,14 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pygithub" -version = "2.3.0" +version = "2.6.1" description = "Use the full Github API v3" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "PyGithub-2.3.0-py3-none-any.whl", hash = "sha256:65b499728be3ce7b0cd2cd760da3b32f0f4d7bc55e5e0677617f90f6564e793e"}, - {file = "PyGithub-2.3.0.tar.gz", hash = "sha256:0148d7347a1cdeed99af905077010aef81a4dad988b0ba51d4108bf66b443f7e"}, + {file = "PyGithub-2.6.1-py3-none-any.whl", hash = "sha256:6f2fa6d076ccae475f9fc392cc6cdbd54db985d4f69b8833a28397de75ed6ca3"}, + {file = "pygithub-2.6.1.tar.gz", hash = "sha256:b5c035392991cca63959e9453286b41b54d83bf2de2daa7d7ff7e4312cebf3bf"}, ] [package.dependencies] @@ -1909,22 +1958,25 @@ version = "2.15.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"}, {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"}, ] [package.extras] -plugins = ["importlib-metadata"] +plugins = ["importlib-metadata ; python_version < \"3.8\""] [[package]] name = "pyicu" -version = "2.13.1" +version = "2.14" description = "Python extension wrapping the ICU C++ API" optional = true python-versions = "*" +groups = ["main"] +markers = "extra == \"all\" or extra == \"user-search\"" files = [ - {file = "PyICU-2.13.1.tar.gz", hash = "sha256:d4919085eaa07da12bade8ee721e7bbf7ade0151ca0f82946a26c8f4b98cdceb"}, + {file = "PyICU-2.14.tar.gz", hash = "sha256:acc7eb92bd5c554ed577249c6978450a4feda0aa6f01470152b3a7b382a02132"}, ] [[package]] @@ -1933,6 +1985,7 @@ version = "2.6.0" description = "JSON Web Token implementation in Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "PyJWT-2.6.0-py3-none-any.whl", hash = "sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14"}, {file = "PyJWT-2.6.0.tar.gz", hash = "sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd"}, @@ -1953,6 +2006,7 @@ version = "0.13.0" description = "Macaroon library for Python" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "pymacaroons-0.13.0-py2.py3-none-any.whl", hash = "sha256:3e14dff6a262fdbf1a15e769ce635a8aea72e6f8f91e408f9a97166c53b91907"}, {file = "pymacaroons-0.13.0.tar.gz", hash = "sha256:1e6bba42a5f66c245adf38a5a4006a99dcc06a0703786ea636098667d42903b8"}, @@ -1968,6 +2022,8 @@ version = "1.0.1" description = "A development tool to measure, monitor and analyze the memory behavior of Python objects." optional = true python-versions = ">=3.6" +groups = ["main"] +markers = "extra == \"all\" or extra == \"cache-memory\"" files = [ {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"}, {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"}, @@ -1979,6 +2035,7 @@ version = "1.5.0" description = "Python binding to the Networking and Cryptography (NaCl) library" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, @@ -2001,42 +2058,45 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" -version = "24.2.1" +version = "25.1.0" description = "Python wrapper module around the OpenSSL library" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "pyOpenSSL-24.2.1-py3-none-any.whl", hash = "sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d"}, - {file = "pyopenssl-24.2.1.tar.gz", hash = "sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95"}, + {file = "pyopenssl-25.1.0-py3-none-any.whl", hash = "sha256:2b11f239acc47ac2e5aca04fd7fa829800aeee22a2eb30d744572a157bd8a1ab"}, + {file = "pyopenssl-25.1.0.tar.gz", hash = "sha256:8d031884482e0c67ee92bf9a4d8cceb08d92aba7136432ffb0703c5280fc205b"}, ] [package.dependencies] -cryptography = ">=41.0.5,<44" +cryptography = ">=41.0.5,<46" +typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""} [package.extras] -docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"] +docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx_rtd_theme"] test = ["pretend", "pytest (>=3.0.1)", "pytest-rerunfailures"] [[package]] name = "pysaml2" -version = "7.3.1" +version = "7.5.0" description = "Python implementation of SAML Version 2 Standard" optional = true -python-versions = ">=3.6.2,<4.0.0" +python-versions = ">=3.9,<4.0" +groups = ["main"] +markers = "extra == \"all\" or extra == \"saml2\"" files = [ - {file = "pysaml2-7.3.1-py3-none-any.whl", hash = "sha256:2cc66e7a371d3f5ff9601f0ed93b5276cca816fce82bb38447d5a0651f2f5193"}, - {file = "pysaml2-7.3.1.tar.gz", hash = "sha256:eab22d187c6dd7707c58b5bb1688f9b8e816427667fc99d77f54399e15cd0a0a"}, + {file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"}, + {file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"}, ] [package.dependencies] cryptography = ">=3.1" defusedxml = "*" -importlib-resources = {version = "*", markers = "python_version < \"3.9\""} pyopenssl = "*" python-dateutil = "*" pytz = "*" requests = ">=2,<3" -xmlschema = ">=1.2.1" +xmlschema = ">=2,<3" [package.extras] s2repoze = ["paste", "repoze.who", "zope.interface"] @@ -2047,6 +2107,8 @@ version = "2.8.2" description = "Extensions to the standard Python datetime module" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, @@ -2057,24 +2119,24 @@ six = ">=1.5" [[package]] name = "python-multipart" -version = "0.0.9" +version = "0.0.20" description = "A streaming multipart parser for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"}, - {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, + {file = "python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104"}, + {file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"}, ] -[package.extras] -dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"] - [[package]] name = "pytz" version = "2022.7.1" description = "World timezone definitions, modern and historical" optional = true python-versions = "*" +groups = ["main"] +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"}, {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"}, @@ -2086,6 +2148,8 @@ version = "0.2.0" description = "" optional = false python-versions = "*" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"win32\"" files = [ {file = "pywin32-ctypes-0.2.0.tar.gz", hash = "sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942"}, {file = "pywin32_ctypes-0.2.0-py2.py3-none-any.whl", hash = "sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98"}, @@ -2093,62 +2157,65 @@ files = [ [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" -files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] @@ -2157,6 +2224,7 @@ version = "37.3" description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "readme_renderer-37.3-py3-none-any.whl", hash = "sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343"}, {file = "readme_renderer-37.3.tar.gz", hash = "sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273"}, @@ -2176,6 +2244,7 @@ version = "0.29.1" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "referencing-0.29.1-py3-none-any.whl", hash = "sha256:d3c8f323ee1480095da44d55917cfb8278d73d6b4d5f677e3e40eb21314ac67f"}, {file = "referencing-0.29.1.tar.gz", hash = "sha256:90cb53782d550ba28d2166ef3f55731f38397def8832baac5d45235f1995e35e"}, @@ -2191,6 +2260,7 @@ version = "2.32.2" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, @@ -2212,6 +2282,7 @@ version = "1.0.0" description = "A utility belt for advanced users of python-requests" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["dev"] files = [ {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, @@ -2226,6 +2297,7 @@ version = "2.0.0" description = "Validating URI References per RFC 3986" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"}, @@ -2240,6 +2312,7 @@ version = "13.3.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.7.0" +groups = ["dev"] files = [ {file = "rich-13.3.2-py3-none-any.whl", hash = "sha256:a104f37270bf677148d8acb07d33be1569eeee87e2d1beb286a4e9113caf6f2f"}, {file = "rich-13.3.2.tar.gz", hash = "sha256:91954fe80cfb7985727a467ca98a7618e5dd15178cc2da10f553b36a93859001"}, @@ -2248,7 +2321,6 @@ files = [ [package.dependencies] markdown-it-py = ">=2.2.0,<3.0.0" pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] @@ -2259,6 +2331,7 @@ version = "0.8.10" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "rpds_py-0.8.10-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:93d06cccae15b3836247319eee7b6f1fdcd6c10dabb4e6d350d27bd0bdca2711"}, {file = "rpds_py-0.8.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3816a890a6a9e9f1de250afa12ca71c9a7a62f2b715a29af6aaee3aea112c181"}, @@ -2361,29 +2434,30 @@ files = [ [[package]] name = "ruff" -version = "0.5.5" +version = "0.11.11" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" -files = [ - {file = "ruff-0.5.5-py3-none-linux_armv6l.whl", hash = "sha256:605d589ec35d1da9213a9d4d7e7a9c761d90bba78fc8790d1c5e65026c1b9eaf"}, - {file = "ruff-0.5.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00817603822a3e42b80f7c3298c8269e09f889ee94640cd1fc7f9329788d7bf8"}, - {file = "ruff-0.5.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:187a60f555e9f865a2ff2c6984b9afeffa7158ba6e1eab56cb830404c942b0f3"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe26fc46fa8c6e0ae3f47ddccfbb136253c831c3289bba044befe68f467bfb16"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad25dd9c5faac95c8e9efb13e15803cd8bbf7f4600645a60ffe17c73f60779b"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f70737c157d7edf749bcb952d13854e8f745cec695a01bdc6e29c29c288fc36e"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:cfd7de17cef6ab559e9f5ab859f0d3296393bc78f69030967ca4d87a541b97a0"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a09b43e02f76ac0145f86a08e045e2ea452066f7ba064fd6b0cdccb486f7c3e7"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0b856cb19c60cd40198be5d8d4b556228e3dcd545b4f423d1ad812bfdca5884"}, - {file = "ruff-0.5.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3687d002f911e8a5faf977e619a034d159a8373514a587249cc00f211c67a091"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ac9dc814e510436e30d0ba535f435a7f3dc97f895f844f5b3f347ec8c228a523"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:af9bdf6c389b5add40d89b201425b531e0a5cceb3cfdcc69f04d3d531c6be74f"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d40a8533ed545390ef8315b8e25c4bb85739b90bd0f3fe1280a29ae364cc55d8"}, - {file = "ruff-0.5.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:cab904683bf9e2ecbbe9ff235bfe056f0eba754d0168ad5407832928d579e7ab"}, - {file = "ruff-0.5.5-py3-none-win32.whl", hash = "sha256:696f18463b47a94575db635ebb4c178188645636f05e934fdf361b74edf1bb2d"}, - {file = "ruff-0.5.5-py3-none-win_amd64.whl", hash = "sha256:50f36d77f52d4c9c2f1361ccbfbd09099a1b2ea5d2b2222c586ab08885cf3445"}, - {file = "ruff-0.5.5-py3-none-win_arm64.whl", hash = "sha256:3191317d967af701f1b73a31ed5788795936e423b7acce82a2b63e26eb3e89d6"}, - {file = "ruff-0.5.5.tar.gz", hash = "sha256:cc5516bdb4858d972fbc31d246bdb390eab8df1a26e2353be2dbc0c2d7f5421a"}, +groups = ["dev"] +files = [ + {file = "ruff-0.11.11-py3-none-linux_armv6l.whl", hash = "sha256:9924e5ae54125ed8958a4f7de320dab7380f6e9fa3195e3dc3b137c6842a0092"}, + {file = "ruff-0.11.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:c8a93276393d91e952f790148eb226658dd275cddfde96c6ca304873f11d2ae4"}, + {file = "ruff-0.11.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d6e333dbe2e6ae84cdedefa943dfd6434753ad321764fd937eef9d6b62022bcd"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7885d9a5e4c77b24e8c88aba8c80be9255fa22ab326019dac2356cff42089fc6"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b5ab797fcc09121ed82e9b12b6f27e34859e4227080a42d090881be888755d4"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e231ff3132c1119ece836487a02785f099a43992b95c2f62847d29bace3c75ac"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a97c9babe1d4081037a90289986925726b802d180cca784ac8da2bbbc335f709"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8c4ddcbe8a19f59f57fd814b8b117d4fcea9bee7c0492e6cf5fdc22cfa563c8"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6224076c344a7694c6fbbb70d4f2a7b730f6d47d2a9dc1e7f9d9bb583faf390b"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:882821fcdf7ae8db7a951df1903d9cb032bbe838852e5fc3c2b6c3ab54e39875"}, + {file = "ruff-0.11.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:dcec2d50756463d9df075a26a85a6affbc1b0148873da3997286caf1ce03cae1"}, + {file = "ruff-0.11.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:99c28505ecbaeb6594701a74e395b187ee083ee26478c1a795d35084d53ebd81"}, + {file = "ruff-0.11.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9263f9e5aa4ff1dec765e99810f1cc53f0c868c5329b69f13845f699fe74f639"}, + {file = "ruff-0.11.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:64ac6f885e3ecb2fdbb71de2701d4e34526651f1e8503af8fb30d4915a3fe345"}, + {file = "ruff-0.11.11-py3-none-win32.whl", hash = "sha256:1adcb9a18802268aaa891ffb67b1c94cd70578f126637118e8099b8e4adcf112"}, + {file = "ruff-0.11.11-py3-none-win_amd64.whl", hash = "sha256:748b4bb245f11e91a04a4ff0f96e386711df0a30412b9fe0c74d5bdc0e4a531f"}, + {file = "ruff-0.11.11-py3-none-win_arm64.whl", hash = "sha256:6c51f136c0364ab1b774767aa8b86331bd8e9d414e2d107db7a2189f35ea1f7b"}, + {file = "ruff-0.11.11.tar.gz", hash = "sha256:7774173cc7c1980e6bf67569ebb7085989a78a103922fb83ef3dfe230cd0687d"}, ] [[package]] @@ -2392,6 +2466,8 @@ version = "3.3.3" description = "Python bindings to FreeDesktop.org Secret Service API" optional = false python-versions = ">=3.6" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"linux\"" files = [ {file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"}, {file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"}, @@ -2407,24 +2483,27 @@ version = "2.10.0" description = "A library implementing the 'SemVer' scheme." optional = false python-versions = ">=2.7" +groups = ["main"] files = [ {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"}, {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, ] [package.extras] -dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"] +dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1) ; python_version == \"3.4\"", "coverage", "flake8", "nose2", "readme-renderer (<25.0) ; python_version == \"3.4\"", "tox", "wheel", "zest.releaser[recommended]"] doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.10.0" +version = "2.22.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" +groups = ["main"] +markers = "extra == \"all\" or extra == \"sentry\"" files = [ - {file = "sentry_sdk-2.10.0-py2.py3-none-any.whl", hash = "sha256:87b3d413c87d8e7f816cc9334bff255a83d8b577db2b22042651c30c19c09190"}, - {file = "sentry_sdk-2.10.0.tar.gz", hash = "sha256:545fcc6e36c335faa6d6cda84669b6e17025f31efbf3b2211ec14efe008b75d1"}, + {file = "sentry_sdk-2.22.0-py2.py3-none-any.whl", hash = "sha256:3d791d631a6c97aad4da7074081a57073126c69487560c6f8bffcf586461de66"}, + {file = "sentry_sdk-2.22.0.tar.gz", hash = "sha256:b4bf43bb38f547c84b2eadcefbe389b36ef75f3f38253d7a74d6b928c07ae944"}, ] [package.dependencies] @@ -2447,15 +2526,19 @@ falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"] +http2 = ["httpcore[http2] (==1.*)"] httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] -huggingface-hub = ["huggingface-hub (>=0.22)"] +huggingface-hub = ["huggingface_hub (>=0.22)"] langchain = ["langchain (>=0.0.210)"] +launchdarkly = ["launchdarkly-server-sdk (>=9.8.0)"] +litestar = ["litestar (>=2.0.0)"] loguru = ["loguru (>=0.5)"] openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"] +openfeature = ["openfeature-sdk (>=0.7.1)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] -opentelemetry-experimental = ["opentelemetry-instrumentation-aio-pika (==0.46b0)", "opentelemetry-instrumentation-aiohttp-client (==0.46b0)", "opentelemetry-instrumentation-aiopg (==0.46b0)", "opentelemetry-instrumentation-asgi (==0.46b0)", "opentelemetry-instrumentation-asyncio (==0.46b0)", "opentelemetry-instrumentation-asyncpg (==0.46b0)", "opentelemetry-instrumentation-aws-lambda (==0.46b0)", "opentelemetry-instrumentation-boto (==0.46b0)", "opentelemetry-instrumentation-boto3sqs (==0.46b0)", "opentelemetry-instrumentation-botocore (==0.46b0)", "opentelemetry-instrumentation-cassandra (==0.46b0)", "opentelemetry-instrumentation-celery (==0.46b0)", "opentelemetry-instrumentation-confluent-kafka (==0.46b0)", "opentelemetry-instrumentation-dbapi (==0.46b0)", "opentelemetry-instrumentation-django (==0.46b0)", "opentelemetry-instrumentation-elasticsearch (==0.46b0)", "opentelemetry-instrumentation-falcon (==0.46b0)", "opentelemetry-instrumentation-fastapi (==0.46b0)", "opentelemetry-instrumentation-flask (==0.46b0)", "opentelemetry-instrumentation-grpc (==0.46b0)", "opentelemetry-instrumentation-httpx (==0.46b0)", "opentelemetry-instrumentation-jinja2 (==0.46b0)", "opentelemetry-instrumentation-kafka-python (==0.46b0)", "opentelemetry-instrumentation-logging (==0.46b0)", "opentelemetry-instrumentation-mysql (==0.46b0)", "opentelemetry-instrumentation-mysqlclient (==0.46b0)", "opentelemetry-instrumentation-pika (==0.46b0)", "opentelemetry-instrumentation-psycopg (==0.46b0)", "opentelemetry-instrumentation-psycopg2 (==0.46b0)", "opentelemetry-instrumentation-pymemcache (==0.46b0)", "opentelemetry-instrumentation-pymongo (==0.46b0)", "opentelemetry-instrumentation-pymysql (==0.46b0)", "opentelemetry-instrumentation-pyramid (==0.46b0)", "opentelemetry-instrumentation-redis (==0.46b0)", "opentelemetry-instrumentation-remoulade (==0.46b0)", "opentelemetry-instrumentation-requests (==0.46b0)", "opentelemetry-instrumentation-sklearn (==0.46b0)", "opentelemetry-instrumentation-sqlalchemy (==0.46b0)", "opentelemetry-instrumentation-sqlite3 (==0.46b0)", "opentelemetry-instrumentation-starlette (==0.46b0)", "opentelemetry-instrumentation-system-metrics (==0.46b0)", "opentelemetry-instrumentation-threading (==0.46b0)", "opentelemetry-instrumentation-tornado (==0.46b0)", "opentelemetry-instrumentation-tortoiseorm (==0.46b0)", "opentelemetry-instrumentation-urllib (==0.46b0)", "opentelemetry-instrumentation-urllib3 (==0.46b0)", "opentelemetry-instrumentation-wsgi (==0.46b0)"] -pure-eval = ["asttokens", "executing", "pure-eval"] +opentelemetry-experimental = ["opentelemetry-distro"] +pure-eval = ["asttokens", "executing", "pure_eval"] pymongo = ["pymongo (>=3.1)"] pyspark = ["pyspark (>=2.4.4)"] quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] @@ -2464,17 +2547,20 @@ sanic = ["sanic (>=0.8)"] sqlalchemy = ["sqlalchemy (>=1.2)"] starlette = ["starlette (>=0.19.1)"] starlite = ["starlite (>=1.48)"] +statsig = ["statsig (>=0.55.3)"] tornado = ["tornado (>=6)"] +unleash = ["UnleashClient (>=6.0.1)"] [[package]] name = "service-identity" -version = "24.1.0" +version = "24.2.0" description = "Service identity verification for pyOpenSSL & cryptography." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "service_identity-24.1.0-py3-none-any.whl", hash = "sha256:a28caf8130c8a5c1c7a6f5293faaf239bbfb7751e4862436920ee6f2616f568a"}, - {file = "service_identity-24.1.0.tar.gz", hash = "sha256:6829c9d62fb832c2e1c435629b0a8c476e1929881f28bee4d20bc24161009221"}, + {file = "service_identity-24.2.0-py3-none-any.whl", hash = "sha256:6b047fbd8a84fd0bb0d55ebce4031e400562b9196e1e0d3e0fe2b8a59f6d4a85"}, + {file = "service_identity-24.2.0.tar.gz", hash = "sha256:b8683ba13f0d39c6cd5d625d2c5f65421d6d707b013b375c355751557cbe8e09"}, ] [package.dependencies] @@ -2484,7 +2570,7 @@ pyasn1 = "*" pyasn1-modules = "*" [package.extras] -dev = ["pyopenssl", "service-identity[idna,mypy,tests]"] +dev = ["coverage[toml] (>=5.0.2)", "idna", "mypy", "pyopenssl", "pytest", "types-pyopenssl"] docs = ["furo", "myst-parser", "pyopenssl", "sphinx", "sphinx-notfound-page"] idna = ["idna"] mypy = ["idna", "mypy", "types-pyopenssl"] @@ -2492,35 +2578,40 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"] [[package]] name = "setuptools" -version = "67.6.0" +version = "78.1.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" +groups = ["main", "dev"] files = [ - {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"}, - {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"}, + {file = "setuptools-78.1.1-py3-none-any.whl", hash = "sha256:c3a9c4211ff4c309edb8b8c4f1cbfa7ae324c4ba9f91ff254e3d305b9fd54561"}, + {file = "setuptools-78.1.1.tar.gz", hash = "sha256:fcc17fd9cd898242f6b4adfaca46137a9edef687f43e6f78469692a5e70d851d"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] +core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "setuptools-rust" -version = "1.8.1" +version = "1.10.2" description = "Setuptools Rust extension plugin" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "setuptools-rust-1.8.1.tar.gz", hash = "sha256:94b1dd5d5308b3138d5b933c3a2b55e6d6927d1a22632e509fcea9ddd0f7e486"}, - {file = "setuptools_rust-1.8.1-py3-none-any.whl", hash = "sha256:b5324493949ccd6aa0c03890c5f6b5f02de4512e3ac1697d02e9a6c02b18aa8e"}, + {file = "setuptools_rust-1.10.2-py3-none-any.whl", hash = "sha256:4b39c435ae9670315d522ed08fa0e8cb29f2a6048033966b6be2571a90ce4f1c"}, + {file = "setuptools_rust-1.10.2.tar.gz", hash = "sha256:5d73e7eee5f87a6417285b617c97088a7c20d1a70fcea60e3bdc94ff567c29dc"}, ] [package.dependencies] semantic-version = ">=2.8.2,<3" setuptools = ">=62.4" -tomli = {version = ">=1.2.1", markers = "python_version < \"3.11\""} [[package]] name = "signedjson" @@ -2528,6 +2619,7 @@ version = "1.1.4" description = "Sign JSON with Ed25519 signatures" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"}, {file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"}, @@ -2547,6 +2639,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -2558,6 +2651,7 @@ version = "5.0.0" description = "A pure Python implementation of a sliding window memory map manager" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, @@ -2569,6 +2663,7 @@ version = "2.4.0" description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, @@ -2580,6 +2675,8 @@ version = "235" description = "Python interface for libsystemd" optional = true python-versions = "*" +groups = ["main"] +markers = "extra == \"systemd\"" files = [ {file = "systemd-python-235.tar.gz", hash = "sha256:4e57f39797fd5d9e2d22b8806a252d7c0106c936039d1e71c8c6b8008e695c0a"}, ] @@ -2590,6 +2687,8 @@ version = "1.0.2" description = "Tornado IOLoop Backed Concurrent Futures" optional = true python-versions = "*" +groups = ["main"] +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, @@ -2604,6 +2703,8 @@ version = "0.16.0" description = "Python bindings for the Apache Thrift RPC system" optional = true python-versions = "*" +groups = ["main"] +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, ] @@ -2618,44 +2719,79 @@ twisted = ["twisted"] [[package]] name = "tomli" -version = "2.0.1" +version = "2.2.1" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] [[package]] name = "tornado" -version = "6.4.1" +version = "6.5" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ - {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"}, - {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"}, - {file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"}, - {file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"}, - {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"}, + {file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"}, + {file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"}, + {file = "tornado-6.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c625b9d03f1fb4d64149c47d0135227f0434ebb803e2008040eb92906b0105a"}, + {file = "tornado-6.5-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a0d8d2309faf015903080fb5bdd969ecf9aa5ff893290845cf3fd5b2dd101bc"}, + {file = "tornado-6.5-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03576ab51e9b1677e4cdaae620d6700d9823568b7939277e4690fe4085886c55"}, + {file = "tornado-6.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ab75fe43d0e1b3a5e3ceddb2a611cb40090dd116a84fc216a07a298d9e000471"}, + {file = "tornado-6.5-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:119c03f440a832128820e87add8a175d211b7f36e7ee161c631780877c28f4fb"}, + {file = "tornado-6.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:231f2193bb4c28db2bdee9e57bc6ca0cd491f345cd307c57d79613b058e807e0"}, + {file = "tornado-6.5-cp39-abi3-win32.whl", hash = "sha256:fd20c816e31be1bbff1f7681f970bbbd0bb241c364220140228ba24242bcdc59"}, + {file = "tornado-6.5-cp39-abi3-win_amd64.whl", hash = "sha256:007f036f7b661e899bd9ef3fa5f87eb2cb4d1b2e7d67368e778e140a2f101a7a"}, + {file = "tornado-6.5-cp39-abi3-win_arm64.whl", hash = "sha256:542e380658dcec911215c4820654662810c06ad872eefe10def6a5e9b20e9633"}, + {file = "tornado-6.5.tar.gz", hash = "sha256:c70c0a26d5b2d85440e4debd14a8d0b463a0cf35d92d3af05f5f1ffa8675c826"}, ] [[package]] name = "towncrier" -version = "24.7.1" +version = "24.8.0" description = "Building newsfiles for your project." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "towncrier-24.7.1-py3-none-any.whl", hash = "sha256:685e2a94335b5dc47537b4d3b449a25b18571ea85b07dcf6e8df31ba40f692dd"}, - {file = "towncrier-24.7.1.tar.gz", hash = "sha256:57a057faedabcadf1a62f6f9bad726ae566c1f31a411338ddb8316993f583b3d"}, + {file = "towncrier-24.8.0-py3-none-any.whl", hash = "sha256:9343209592b839209cdf28c339ba45792fbfe9775b5f9c177462fd693e127d8d"}, + {file = "towncrier-24.8.0.tar.gz", hash = "sha256:013423ee7eed102b2f393c287d22d95f66f1a3ea10a4baa82d298001a7f18af3"}, ] [package.dependencies] @@ -2670,13 +2806,14 @@ dev = ["furo (>=2024.05.06)", "nox", "packaging", "sphinx (>=5)", "twisted"] [[package]] name = "treq" -version = "23.11.0" +version = "24.9.1" description = "High-level Twisted HTTP Client API" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "treq-23.11.0-py3-none-any.whl", hash = "sha256:f494c2218d61cab2cabbee37cd6606d3eea9d16cf14190323095c95d22c467e9"}, - {file = "treq-23.11.0.tar.gz", hash = "sha256:0914ff929fd1632ce16797235260f8bc19d20ff7c459c1deabd65b8c68cbeac5"}, + {file = "treq-24.9.1-py3-none-any.whl", hash = "sha256:eee4756fd9a857c77f180fd5202b52c518f2d3e2826dce28b89066c03bfc45d0"}, + {file = "treq-24.9.1.tar.gz", hash = "sha256:15da7fc404f3e4ed59d0abe5f8eef4966fabbe618039a2a23bc7c15305cefea8"}, ] [package.dependencies] @@ -2685,6 +2822,7 @@ hyperlink = ">=21.0.0" incremental = "*" requests = ">=2.1.0" Twisted = {version = ">=22.10.0", extras = ["tls"]} +typing-extensions = ">=3.10.0" [package.extras] dev = ["httpbin (==0.7.0)", "pep8", "pyflakes", "werkzeug (==2.0.3)"] @@ -2692,19 +2830,21 @@ docs = ["sphinx (<7.0.0)"] [[package]] name = "twine" -version = "5.1.1" +version = "6.1.0" description = "Collection of utilities for publishing packages on PyPI" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "twine-5.1.1-py3-none-any.whl", hash = "sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997"}, - {file = "twine-5.1.1.tar.gz", hash = "sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db"}, + {file = "twine-6.1.0-py3-none-any.whl", hash = "sha256:a47f973caf122930bf0fbbf17f80b83bc1602c9ce393c7845f289a3001dc5384"}, + {file = "twine-6.1.0.tar.gz", hash = "sha256:be324f6272eff91d07ee93f251edf232fc647935dd585ac003539b42404a8dbd"}, ] [package.dependencies] -importlib-metadata = ">=3.6" -keyring = ">=15.1" -pkginfo = ">=1.8.1,<1.11" +id = "*" +importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} +keyring = {version = ">=15.1", markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\""} +packaging = ">=24.0" readme-renderer = ">=35.0" requests = ">=2.20" requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0" @@ -2712,15 +2852,19 @@ rfc3986 = ">=1.4.0" rich = ">=12.0.0" urllib3 = ">=1.26.0" +[package.extras] +keyring = ["keyring (>=15.1)"] + [[package]] name = "twisted" -version = "24.7.0rc1" +version = "24.7.0" description = "An asynchronous networking framework written in Python" optional = false python-versions = ">=3.8.0" +groups = ["main"] files = [ - {file = "twisted-24.7.0rc1-py3-none-any.whl", hash = "sha256:f37d6656fe4e2871fab29d8952ae90bd6ca8b48a9e4dfa1b348f4cd62e6ba0bb"}, - {file = "twisted-24.7.0rc1.tar.gz", hash = "sha256:bbc4a2193ca34cfa32f626300746698a6d70fcd77d9c0b79a664c347e39634fc"}, + {file = "twisted-24.7.0-py3-none-any.whl", hash = "sha256:734832ef98108136e222b5230075b1079dad8a3fc5637319615619a7725b0c81"}, + {file = "twisted-24.7.0.tar.gz", hash = "sha256:5a60147f044187a127ec7da96d170d49bcce50c6fd36f594e60f4587eff4d394"}, ] [package.dependencies] @@ -2736,29 +2880,31 @@ typing-extensions = ">=4.2.0" zope-interface = ">=5" [package.extras] -all-non-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] +all-non-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)"] dev = ["coverage (>=7.5,<8.0)", "cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "python-subunit (>=1.4,<2.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)"] dev-release = ["pydoctor (>=23.9.0,<23.10.0)", "pydoctor (>=23.9.0,<23.10.0)", "sphinx (>=6,<7)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "towncrier (>=23.6,<24.0)"] -gtk-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pygobject", "pygobject", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] +gtk-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pygobject", "pygobject", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"] -macos-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] -mypy = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "coverage (>=7.5,<8.0)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "idna (>=2.4)", "mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "priority (>=1.1.0,<2.0)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)", "types-pyopenssl", "types-setuptools"] -osx-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] -serial = ["pyserial (>=3.0)", "pywin32 (!=226)"] +macos-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] +mypy = ["appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "coverage (>=7.5,<8.0)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "idna (>=2.4)", "mypy (>=1.8,<2.0)", "mypy-zope (>=1.0.3,<1.1.0)", "priority (>=1.1.0,<2.0)", "pydoctor (>=23.9.0,<23.10.0)", "pyflakes (>=2.2,<3.0)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "sphinx (>=6,<7)", "sphinx-rtd-theme (>=1.3,<2.0)", "towncrier (>=23.6,<24.0)", "twistedchecker (>=0.7,<1.0)", "types-pyopenssl", "types-setuptools"] +osx-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyobjc-core", "pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "pyobjc-framework-cocoa", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)"] +serial = ["pyserial (>=3.0)", "pywin32 (!=226) ; platform_system == \"Windows\""] test = ["cython-test-exception-raiser (>=1.0.2,<2)", "hypothesis (>=6.56)", "pyhamcrest (>=2)"] tls = ["idna (>=2.4)", "pyopenssl (>=21.0.0)", "service-identity (>=18.1.0)"] -windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "twisted-iocpsupport (>=1.0.2)", "twisted-iocpsupport (>=1.0.2)"] +windows-platform = ["appdirs (>=1.4.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.1.3)", "bcrypt (>=3.1.3)", "cryptography (>=3.3)", "cryptography (>=3.3)", "cython-test-exception-raiser (>=1.0.2,<2)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "h2 (>=3.0,<5.0)", "hypothesis (>=6.56)", "hypothesis (>=6.56)", "idna (>=2.4)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "priority (>=1.1.0,<2.0)", "pyhamcrest (>=2)", "pyhamcrest (>=2)", "pyopenssl (>=21.0.0)", "pyopenssl (>=21.0.0)", "pyserial (>=3.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "pywin32 (!=226) ; platform_system == \"Windows\"", "pywin32 (!=226) ; platform_system == \"Windows\"", "service-identity (>=18.1.0)", "service-identity (>=18.1.0)", "twisted-iocpsupport (>=1.0.2)", "twisted-iocpsupport (>=1.0.2)"] [[package]] name = "txredisapi" -version = "1.4.10" +version = "1.4.11" description = "non-blocking redis client for python" optional = true python-versions = "*" +groups = ["main"] +markers = "extra == \"all\" or extra == \"redis\"" files = [ - {file = "txredisapi-1.4.10-py3-none-any.whl", hash = "sha256:0a6ea77f27f8cf092f907654f08302a97b48fa35f24e0ad99dfb74115f018161"}, - {file = "txredisapi-1.4.10.tar.gz", hash = "sha256:7609a6af6ff4619a3189c0adfb86aeda789afba69eb59fc1e19ac0199e725395"}, + {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"}, + {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"}, ] [package.dependencies] @@ -2767,13 +2913,14 @@ twisted = "*" [[package]] name = "types-bleach" -version = "6.1.0.20240331" +version = "6.2.0.20241123" description = "Typing stubs for bleach" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "types-bleach-6.1.0.20240331.tar.gz", hash = "sha256:2ee858a84fb06fc2225ff56ba2f7f6c88b65638659efae0d7bfd6b24a1b5a524"}, - {file = "types_bleach-6.1.0.20240331-py3-none-any.whl", hash = "sha256:399bc59bfd20a36a56595f13f805e56c8a08e5a5c07903e5cf6fafb5a5107dd4"}, + {file = "types_bleach-6.2.0.20241123-py3-none-any.whl", hash = "sha256:c6e58b3646665ca7c6b29890375390f4569e84f0cf5c171e0fe1ddb71a7be86a"}, + {file = "types_bleach-6.2.0.20241123.tar.gz", hash = "sha256:dac5fe9015173514da3ac810c1a935619a3ccbcc5d66c4cbf4707eac00539057"}, ] [package.dependencies] @@ -2785,6 +2932,7 @@ version = "1.16.0.20240331" description = "Typing stubs for cffi" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-cffi-1.16.0.20240331.tar.gz", hash = "sha256:b8b20d23a2b89cfed5f8c5bc53b0cb8677c3aac6d970dbc771e28b9c698f5dee"}, {file = "types_cffi-1.16.0.20240331-py3-none-any.whl", hash = "sha256:a363e5ea54a4eb6a4a105d800685fde596bc318089b025b27dee09849fe41ff0"}, @@ -2799,6 +2947,7 @@ version = "0.9.2.20240106" description = "Typing stubs for commonmark" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-commonmark-0.9.2.20240106.tar.gz", hash = "sha256:52a062b71766d6ab258fca2d8e19fb0853796e25ca9afa9d0f67a1e42c93479f"}, {file = "types_commonmark-0.9.2.20240106-py3-none-any.whl", hash = "sha256:606d9de1e3a96cab0b1c0b6cccf4df099116148d1d864d115fde2e27ad6877c3"}, @@ -2810,6 +2959,7 @@ version = "1.1.11.20240228" description = "Typing stubs for html5lib" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-html5lib-1.1.11.20240228.tar.gz", hash = "sha256:22736b7299e605ec4ba539d48691e905fd0c61c3ea610acc59922232dc84cede"}, {file = "types_html5lib-1.1.11.20240228-py3-none-any.whl", hash = "sha256:af5de0125cb0fe5667543b158db83849b22e25c0e36c9149836b095548bf1020"}, @@ -2817,13 +2967,14 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.23.0.20240712" +version = "4.23.0.20250516" description = "Typing stubs for jsonschema" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "types-jsonschema-4.23.0.20240712.tar.gz", hash = "sha256:b20db728dcf7ea3e80e9bdeb55e8b8420c6c040cda14e8cf284465adee71d217"}, - {file = "types_jsonschema-4.23.0.20240712-py3-none-any.whl", hash = "sha256:8c33177ce95336241c1d61ccb56a9964d4361b99d5f1cd81a1ab4909b0dd7cf4"}, + {file = "types_jsonschema-4.23.0.20250516-py3-none-any.whl", hash = "sha256:e7d0dd7db7e59e63c26e3230e26ffc64c4704cc5170dc21270b366a35ead1618"}, + {file = "types_jsonschema-4.23.0.20250516.tar.gz", hash = "sha256:9ace09d9d35c4390a7251ccd7d833b92ccc189d24d1b347f26212afce361117e"}, ] [package.dependencies] @@ -2835,6 +2986,7 @@ version = "1.3.0.20240530" description = "Typing stubs for netaddr" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-netaddr-1.3.0.20240530.tar.gz", hash = "sha256:742c2ec1f202b666f544223e2616b34f1f13df80c91e5aeaaa93a72e4d0774ea"}, {file = "types_netaddr-1.3.0.20240530-py3-none-any.whl", hash = "sha256:354998d018e326da4f1d9b005fc91137b7c2c473aaf03c4ef64bf83c6861b440"}, @@ -2846,6 +2998,7 @@ version = "2.4.10.6" description = "Typing stubs for opentracing" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "types-opentracing-2.4.10.6.tar.gz", hash = "sha256:87a1bdfce9de5e555e30497663583b9b9c3bb494d029ef9806aa1f137c19e744"}, {file = "types_opentracing-2.4.10.6-py3-none-any.whl", hash = "sha256:25914c834db033a4a38fc322df0b5e5e14503b0ac97f78304ae180d721555e97"}, @@ -2853,24 +3006,26 @@ files = [ [[package]] name = "types-pillow" -version = "10.2.0.20240520" +version = "10.2.0.20240822" description = "Typing stubs for Pillow" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "types-Pillow-10.2.0.20240520.tar.gz", hash = "sha256:130b979195465fa1e1676d8e81c9c7c30319e8e95b12fae945e8f0d525213107"}, - {file = "types_Pillow-10.2.0.20240520-py3-none-any.whl", hash = "sha256:33c36494b380e2a269bb742181bea5d9b00820367822dbd3760f07210a1da23d"}, + {file = "types-Pillow-10.2.0.20240822.tar.gz", hash = "sha256:559fb52a2ef991c326e4a0d20accb3bb63a7ba8d40eb493e0ecb0310ba52f0d3"}, + {file = "types_Pillow-10.2.0.20240822-py3-none-any.whl", hash = "sha256:d9dab025aba07aeb12fd50a6799d4eac52a9603488eca09d7662543983f16c5d"}, ] [[package]] name = "types-psycopg2" -version = "2.9.21.20240417" +version = "2.9.21.20250318" description = "Typing stubs for psycopg2" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "types-psycopg2-2.9.21.20240417.tar.gz", hash = "sha256:05db256f4a459fb21a426b8e7fca0656c3539105ff0208eaf6bdaf406a387087"}, - {file = "types_psycopg2-2.9.21.20240417-py3-none-any.whl", hash = "sha256:644d6644d64ebbe37203229b00771012fb3b3bddd507a129a2e136485990e4f8"}, + {file = "types_psycopg2-2.9.21.20250318-py3-none-any.whl", hash = "sha256:7296d111ad950bbd2fc979a1ab0572acae69047f922280e77db657c00d2c79c0"}, + {file = "types_psycopg2-2.9.21.20250318.tar.gz", hash = "sha256:eb6eac5bfb16adfd5f16b818918b9e26a40ede147e0f2bbffdf53a6ef7025a87"}, ] [[package]] @@ -2879,6 +3034,7 @@ version = "24.1.0.20240722" description = "Typing stubs for pyOpenSSL" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-pyOpenSSL-24.1.0.20240722.tar.gz", hash = "sha256:47913b4678a01d879f503a12044468221ed8576263c1540dcb0484ca21b08c39"}, {file = "types_pyOpenSSL-24.1.0.20240722-py3-none-any.whl", hash = "sha256:6a7a5d2ec042537934cfb4c9d4deb0e16c4c6250b09358df1f083682fe6fda54"}, @@ -2890,24 +3046,26 @@ types-cffi = "*" [[package]] name = "types-pyyaml" -version = "6.0.12.20240311" +version = "6.0.12.20241230" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"}, - {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"}, + {file = "types_PyYAML-6.0.12.20241230-py3-none-any.whl", hash = "sha256:fa4d32565219b68e6dee5f67534c722e53c00d1cfc09c435ef04d7353e1e96e6"}, + {file = "types_pyyaml-6.0.12.20241230.tar.gz", hash = "sha256:7f07622dbd34bb9c8b264fe860a17e0efcad00d50b5f27e93984909d9363498c"}, ] [[package]] name = "types-requests" -version = "2.31.0.20240406" +version = "2.32.0.20250328" description = "Typing stubs for requests" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "types-requests-2.31.0.20240406.tar.gz", hash = "sha256:4428df33c5503945c74b3f42e82b181e86ec7b724620419a2966e2de604ce1a1"}, - {file = "types_requests-2.31.0.20240406-py3-none-any.whl", hash = "sha256:6216cdac377c6b9a040ac1c0404f7284bd13199c0e1bb235f4324627e8898cf5"}, + {file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"}, + {file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"}, ] [package.dependencies] @@ -2915,13 +3073,14 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "71.1.0.20240726" +version = "75.2.0.20241019" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "types-setuptools-71.1.0.20240726.tar.gz", hash = "sha256:85ba28e9461bb1be86ebba4db0f1c2408f2b11115b1966334ea9dc464e29303e"}, - {file = "types_setuptools-71.1.0.20240726-py3-none-any.whl", hash = "sha256:a7775376f36e0ff09bcad236bf265777590a66b11623e48c20bfc30f1444ea36"}, + {file = "types-setuptools-75.2.0.20241019.tar.gz", hash = "sha256:86ea31b5f6df2c6b8f2dc8ae3f72b213607f62549b6fa2ed5866e5299f968694"}, + {file = "types_setuptools-75.2.0.20241019-py3-none-any.whl", hash = "sha256:2e48ff3acd4919471e80d5e3f049cce5c177e108d5d36d2d4cee3fa4d4104258"}, ] [[package]] @@ -2930,17 +3089,34 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] +name = "typing-inspection" +version = "0.4.0" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, + {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + +[[package]] name = "unpaddedbase64" version = "2.1.0" description = "Encode and decode Base64 without \"=\" padding" optional = false python-versions = ">=3.6,<4.0" +groups = ["main"] files = [ {file = "unpaddedbase64-2.1.0-py3-none-any.whl", hash = "sha256:485eff129c30175d2cd6f0cd8d2310dff51e666f7f36175f738d75dfdbd0b1c6"}, {file = "unpaddedbase64-2.1.0.tar.gz", hash = "sha256:7273c60c089de39d90f5d6d4a7883a79e319dc9d9b1c8924a7fab96178a5f005"}, @@ -2952,13 +3128,14 @@ version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -2969,6 +3146,7 @@ version = "0.5.1" description = "Character encoding aliases for legacy web content" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, @@ -2980,6 +3158,7 @@ version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +groups = ["dev"] files = [ {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"}, @@ -3064,6 +3243,8 @@ version = "2.4.0" description = "An XML Schema validator and decoder" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"}, {file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"}, @@ -3083,6 +3264,8 @@ version = "3.19.1" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" +groups = ["dev"] +markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and python_version < \"3.12\" or python_version < \"3.10\"" files = [ {file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"}, {file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"}, @@ -3098,6 +3281,7 @@ version = "4.6" description = "Very basic event publishing system" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "zope.event-4.6-py2.py3-none-any.whl", hash = "sha256:73d9e3ef750cca14816a9c322c7250b0d7c9dbc337df5d1b807ff8d3d0b9e97c"}, {file = "zope.event-4.6.tar.gz", hash = "sha256:81d98813046fc86cc4136e3698fee628a3282f9c320db18658c21749235fce80"}, @@ -3112,50 +3296,58 @@ test = ["zope.testrunner"] [[package]] name = "zope-interface" -version = "6.0" +version = "7.1.0" description = "Interfaces for Python" optional = false -python-versions = ">=3.7" -files = [ - {file = "zope.interface-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f299c020c6679cb389814a3b81200fe55d428012c5e76da7e722491f5d205990"}, - {file = "zope.interface-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ee4b43f35f5dc15e1fec55ccb53c130adb1d11e8ad8263d68b1284b66a04190d"}, - {file = "zope.interface-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a158846d0fca0a908c1afb281ddba88744d403f2550dc34405c3691769cdd85"}, - {file = "zope.interface-6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f72f23bab1848edb7472309e9898603141644faec9fd57a823ea6b4d1c4c8995"}, - {file = "zope.interface-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48f4d38cf4b462e75fac78b6f11ad47b06b1c568eb59896db5b6ec1094eb467f"}, - {file = "zope.interface-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:87b690bbee9876163210fd3f500ee59f5803e4a6607d1b1238833b8885ebd410"}, - {file = "zope.interface-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f2363e5fd81afb650085c6686f2ee3706975c54f331b426800b53531191fdf28"}, - {file = "zope.interface-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af169ba897692e9cd984a81cb0f02e46dacdc07d6cf9fd5c91e81f8efaf93d52"}, - {file = "zope.interface-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa90bac61c9dc3e1a563e5babb3fd2c0c1c80567e815442ddbe561eadc803b30"}, - {file = "zope.interface-6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89086c9d3490a0f265a3c4b794037a84541ff5ffa28bb9c24cc9f66566968464"}, - {file = "zope.interface-6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:809fe3bf1a91393abc7e92d607976bbb8586512913a79f2bf7d7ec15bd8ea518"}, - {file = "zope.interface-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:0ec9653825f837fbddc4e4b603d90269b501486c11800d7c761eee7ce46d1bbb"}, - {file = "zope.interface-6.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:790c1d9d8f9c92819c31ea660cd43c3d5451df1df61e2e814a6f99cebb292788"}, - {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b39b8711578dcfd45fc0140993403b8a81e879ec25d53189f3faa1f006087dca"}, - {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eba51599370c87088d8882ab74f637de0c4f04a6d08a312dce49368ba9ed5c2a"}, - {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee934f023f875ec2cfd2b05a937bd817efcc6c4c3f55c5778cbf78e58362ddc"}, - {file = "zope.interface-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:042f2381118b093714081fd82c98e3b189b68db38ee7d35b63c327c470ef8373"}, - {file = "zope.interface-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dfbbbf0809a3606046a41f8561c3eada9db811be94138f42d9135a5c47e75f6f"}, - {file = "zope.interface-6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:424d23b97fa1542d7be882eae0c0fc3d6827784105264a8169a26ce16db260d8"}, - {file = "zope.interface-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e538f2d4a6ffb6edfb303ce70ae7e88629ac6e5581870e66c306d9ad7b564a58"}, - {file = "zope.interface-6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12175ca6b4db7621aedd7c30aa7cfa0a2d65ea3a0105393e05482d7a2d367446"}, - {file = "zope.interface-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3d7dfd897a588ec27e391edbe3dd320a03684457470415870254e714126b1f"}, - {file = "zope.interface-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:b3f543ae9d3408549a9900720f18c0194ac0fe810cecda2a584fd4dca2eb3bb8"}, - {file = "zope.interface-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0583b75f2e70ec93f100931660328965bb9ff65ae54695fb3fa0a1255daa6f2"}, - {file = "zope.interface-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:23ac41d52fd15dd8be77e3257bc51bbb82469cf7f5e9a30b75e903e21439d16c"}, - {file = "zope.interface-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99856d6c98a326abbcc2363827e16bd6044f70f2ef42f453c0bd5440c4ce24e5"}, - {file = "zope.interface-6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1592f68ae11e557b9ff2bc96ac8fc30b187e77c45a3c9cd876e3368c53dc5ba8"}, - {file = "zope.interface-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4407b1435572e3e1610797c9203ad2753666c62883b921318c5403fb7139dec2"}, - {file = "zope.interface-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:5171eb073474a5038321409a630904fd61f12dd1856dd7e9d19cd6fe092cbbc5"}, - {file = "zope.interface-6.0.tar.gz", hash = "sha256:aab584725afd10c710b8f1e6e208dbee2d0ad009f57d674cb9d1b3964037275d"}, +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "zope.interface-7.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2bd9e9f366a5df08ebbdc159f8224904c1c5ce63893984abb76954e6fbe4381a"}, + {file = "zope.interface-7.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:661d5df403cd3c5b8699ac480fa7f58047a3253b029db690efa0c3cf209993ef"}, + {file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91b6c30689cfd87c8f264acb2fc16ad6b3c72caba2aec1bf189314cf1a84ca33"}, + {file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b6a4924f5bad9fe21d99f66a07da60d75696a136162427951ec3cb223a5570d"}, + {file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a3c00b35f6170be5454b45abe2719ea65919a2f09e8a6e7b1362312a872cd3"}, + {file = "zope.interface-7.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b936d61dbe29572fd2cfe13e30b925e5383bed1aba867692670f5a2a2eb7b4e9"}, + {file = "zope.interface-7.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ac20581fc6cd7c754f6dff0ae06fedb060fa0e9ea6309d8be8b2701d9ea51c4"}, + {file = "zope.interface-7.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:848b6fa92d7c8143646e64124ed46818a0049a24ecc517958c520081fd147685"}, + {file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec1ef1fdb6f014d5886b97e52b16d0f852364f447d2ab0f0c6027765777b6667"}, + {file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bcff5c09d0215f42ba64b49205a278e44413d9bf9fa688fd9e42bfe472b5f4f"}, + {file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07add15de0cc7e69917f7d286b64d54125c950aeb43efed7a5ea7172f000fbc1"}, + {file = "zope.interface-7.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:9940d5bc441f887c5f375ec62bcf7e7e495a2d5b1da97de1184a88fb567f06af"}, + {file = "zope.interface-7.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f245d039f72e6f802902375755846f5de1ee1e14c3e8736c078565599bcab621"}, + {file = "zope.interface-7.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6159e767d224d8f18deff634a1d3722e68d27488c357f62ebeb5f3e2f5288b1f"}, + {file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e956b1fd7f3448dd5e00f273072e73e50dfafcb35e4227e6d5af208075593c9"}, + {file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff115ef91c0eeac69cd92daeba36a9d8e14daee445b504eeea2b1c0b55821984"}, + {file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec001798ab62c3fc5447162bf48496ae9fba02edc295a9e10a0b0c639a6452e"}, + {file = "zope.interface-7.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:124149e2d42067b9c6597f4dafdc7a0983d0163868f897b7bb5dc850b14f9a87"}, + {file = "zope.interface-7.1.0-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:9733a9a0f94ef53d7aa64661811b20875b5bc6039034c6e42fb9732170130573"}, + {file = "zope.interface-7.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5fcf379b875c610b5a41bc8a891841533f98de0520287d7f85e25386cd10d3e9"}, + {file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0a45b5af9f72c805ee668d1479480ca85169312211bed6ed18c343e39307d5f"}, + {file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af4a12b459a273b0b34679a5c3dc5e34c1847c3dd14a628aa0668e19e638ea2"}, + {file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a735f82d2e3ed47ca01a20dfc4c779b966b16352650a8036ab3955aad151ed8a"}, + {file = "zope.interface-7.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:5501e772aff595e3c54266bc1bfc5858e8f38974ce413a8f1044aae0f32a83a3"}, + {file = "zope.interface-7.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec59fe53db7d32abb96c6d4efeed84aab4a7c38c62d7a901a9b20c09dd936e7a"}, + {file = "zope.interface-7.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e53c291debef523b09e1fe3dffe5f35dde164f1c603d77f770b88a1da34b7ed6"}, + {file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:711eebc77f2092c6a8b304bad0b81a6ce3cf5490b25574e7309fbc07d881e3af"}, + {file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a00ead2e24c76436e1b457a5132d87f83858330f6c923640b7ef82d668525d1"}, + {file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e28ea0bc4b084fc93a483877653a033062435317082cdc6388dec3438309faf"}, + {file = "zope.interface-7.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:27cfb5205d68b12682b6e55ab8424662d96e8ead19550aad0796b08dd2c9a45e"}, + {file = "zope.interface-7.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e3e48f3dea21c147e1b10c132016cb79af1159facca9736d231694ef5a740a8"}, + {file = "zope.interface-7.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a99240b1d02dc469f6afbe7da1bf617645e60290c272968f4e53feec18d7dce8"}, + {file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc8a318162123eddbdf22fcc7b751288ce52e4ad096d3766ff1799244352449d"}, + {file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7b25db127db3e6b597c5f74af60309c4ad65acd826f89609662f0dc33a54728"}, + {file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a29ac607e970b5576547f0e3589ec156e04de17af42839eedcf478450687317"}, + {file = "zope.interface-7.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:a14c9decf0eb61e0892631271d500c1e306c7b6901c998c7035e194d9150fdd1"}, + {file = "zope_interface-7.1.0.tar.gz", hash = "sha256:3f005869a1a05e368965adb2075f97f8ee9a26c61898a9e52a9764d93774f237"}, ] [package.dependencies] setuptools = "*" [package.extras] -docs = ["Sphinx", "repoze.sphinx.autointerface"] -test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] -testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] +docs = ["Sphinx", "furo", "repoze.sphinx.autointerface"] +test = ["coverage[toml]", "zope.event", "zope.testing"] +testing = ["coverage[toml]", "zope.event", "zope.testing"] [[package]] name = "zope-schema" @@ -3163,6 +3355,7 @@ version = "7.0.1" description = "zope.interface extension for defining data schemas" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "zope.schema-7.0.1-py3-none-any.whl", hash = "sha256:cf006c678793b00e0075ad54d55281c8785ea21e5bc1f5ec0584787719c2aab2"}, {file = "zope.schema-7.0.1.tar.gz", hash = "sha256:ead4dbcb03354d4e410c9a3b904451eb44d90254751b1cbdedf4a61aede9fbb9"}, @@ -3194,6 +3387,6 @@ url-preview = ["lxml"] user-search = ["pyicu"] [metadata] -lock-version = "2.0" -python-versions = "^3.8.0" -content-hash = "c165cdc1f6612c9f1b5bfd8063c23e2d595d717dd8ac1a468519e902be2cdf93" +lock-version = "2.1" +python-versions = "^3.9.0" +content-hash = "9824e42dfc0e128129ee0c8641f7fe639bf47574cdd3f052dd995941abc6e44b" diff --git a/pylint.cfg b/pylint.cfg deleted file mode 100644
index 2368997112..0000000000 --- a/pylint.cfg +++ /dev/null
@@ -1,280 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Profiled execution. -profile=no - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - - -[MESSAGES CONTROL] - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time. See also the "--disable" option for examples. -#enable= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=missing-docstring - - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Add a comment according to your evaluation note. This is used by the global -# evaluation report (RP0004). -comment=no - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of classes names for which member attributes should not be checked -# (useful for classes with attributes dynamically set). -ignored-classes=SQLObject - -# When zope mode is activated, add a predefined set of Zope acquired attributes -# to generated-members. -zope=no - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E0201 when accessed. Python regular -# expressions are accepted. -generated-members=REQUEST,acl_users,aq_parent - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the beginning of the name of dummy variables -# (i.e. not used). -dummy-variables-rgx=_$|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - - -[BASIC] - -# Required attributes for module, separated by a comma -required-attributes= - -# List of builtins function names that should not be used, separated by a comma -bad-functions=map,filter,apply,input - -# Regular expression which should only match correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression which should only match correct module level names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression which should only match correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression which should only match correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct method names -method-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct instance attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression which should only match correct attribute names in class -# bodies -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression which should only match correct list comprehension / -# generator expression variable names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=__.*__ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=80 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )?<?https?://\S+>?$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub,TERMIOS,Bastion,rexec - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - - -[CLASSES] - -# List of interface methods to ignore, separated by a comma. This is used for -# instance to not check methods defines in Zope's Interface base class. -ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/pyproject.toml b/pyproject.toml
index c29d1534fb..ed900288b4 100644 --- a/pyproject.toml +++ b/pyproject.toml
@@ -34,14 +34,9 @@ name = "Internal Changes" showcontent = true -[tool.black] -target-version = ['py38', 'py39', 'py310', 'py311'] -# black ignores everything in .gitignore by default, see -# https://black.readthedocs.io/en/stable/usage_and_configuration/file_collection_and_discovery.html#gitignore -# Use `extend-exclude` if you want to exclude something in addition to this. - [tool.ruff] line-length = 88 +target-version = "py39" [tool.ruff.lint] # See https://beta.ruff.rs/docs/rules/#error-e @@ -63,6 +58,8 @@ select = [ "W", # pyflakes "F", + # isort + "I001", # flake8-bugbear "B0", # flake8-comprehensions @@ -79,17 +76,20 @@ select = [ "EXE", ] -[tool.isort] -line_length = 88 -sections = ["FUTURE", "STDLIB", "THIRDPARTY", "TWISTED", "FIRSTPARTY", "TESTS", "LOCALFOLDER"] -default_section = "THIRDPARTY" -known_first_party = ["synapse"] -known_tests = ["tests"] -known_twisted = ["twisted", "OpenSSL"] -multi_line_output = 3 -include_trailing_comma = true -combine_as_imports = true -skip_gitignore = true +[tool.ruff.lint.isort] +combine-as-imports = true +section-order = ["future", "standard-library", "third-party", "twisted", "first-party", "testing", "local-folder"] +known-first-party = ["synapse"] + +[tool.ruff.lint.isort.sections] +twisted = ["twisted", "OpenSSL"] +testing = ["tests"] + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" [tool.maturin] manifest-path = "rust/Cargo.toml" @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.113.0rc1" +version = "1.132.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] license = "AGPL-3.0-or-later" @@ -155,7 +155,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main" update_synapse_database = "synapse._scripts.update_synapse_database:main" [tool.poetry.dependencies] -python = "^3.8.0" +python = "^3.9.0" # Mandatory Dependencies # ---------------------- @@ -178,7 +178,7 @@ Twisted = {extras = ["tls"], version = ">=18.9.0"} treq = ">=15.1" # Twisted has required pyopenssl 16.0 since about Twisted 16.6. pyOpenSSL = ">=16.0.0" -PyYAML = ">=3.13" +PyYAML = ">=5.3" pyasn1 = ">=0.1.9" pyasn1-modules = ">=0.0.7" bcrypt = ">=3.1.7" @@ -234,14 +234,13 @@ matrix-synapse-ldap3 = { version = ">=0.1", optional = true } psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'PyPy'", optional = true } psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true } psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true } -pysaml2 = { version = ">=4.5.0", optional = true } authlib = { version = ">=0.15.1", optional = true } # systemd-python is necessary for logging to the systemd journal via # `systemd.journal.JournalHandler`, as is documented in # `contrib/systemd/log_config.yaml`. # Note: systemd-python 231 appears to have been yanked from pypi systemd-python = { version = ">=231", optional = true } -lxml = { version = ">=4.2.0", optional = true } +lxml = { version = ">=4.5.2", optional = true } sentry-sdk = { version = ">=0.7.2", optional = true } opentracing = { version = ">=2.2.0", optional = true } jaeger-client = { version = ">=4.0.0", optional = true } @@ -257,7 +256,6 @@ pyicu = { version = ">=2.10.2", optional = true } # twice: once here, and once in the `all` extra. matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"] -saml2 = ["pysaml2"] oidc = ["authlib"] # systemd-python is necessary for logging to the systemd journal via # `systemd.journal.JournalHandler`, as is documented in @@ -294,8 +292,6 @@ all = [ "matrix-synapse-ldap3", # postgres "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", - # saml2 - "pysaml2", # oidc and jwt "authlib", # url-preview @@ -320,9 +316,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -isort = ">=5.10.1" -black = ">=22.7.0" -ruff = "0.5.5" +ruff = "0.11.11" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" @@ -372,7 +366,7 @@ tomli = ">=1.2.3" # runtime errors caused by build system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"] +requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.10.2"] build-backend = "poetry.core.masonry.api" @@ -380,16 +374,22 @@ build-backend = "poetry.core.masonry.api" # Skip unsupported platforms (by us or by Rust). # See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets. # We skip: -# - CPython 3.6 and 3.7: EOLed -# - PyPy 3.7: we only support Python 3.8+ +# - CPython 3.6, 3.7 and 3.8: EOLed +# - PyPy 3.7 and 3.8: we only support Python 3.9+ # - musllinux i686: excluded to reduce number of wheels we build. # c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677 # - PyPy on Aarch64 and musllinux on aarch64: too slow to build. # c.f. https://github.com/matrix-org/synapse/pull/14259 -skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64" +skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64" +# Enable non-default builds. +# "pypy" used to be included by default up until cibuildwheel 3. +enable = "pypy" -# We need a rust compiler -before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal" +# We need a rust compiler. +# +# We temporarily pin Rust to 1.82.0 to work around +# https://github.com/element-hq/synapse/issues/17988 +before-all = "sh .ci/before_build_wheel.sh" environment= { PATH = "$PATH:$HOME/.cargo/bin" } # For some reason if we don't manually clean the build directory we diff --git a/rust/Cargo.toml b/rust/Cargo.toml
index 026487275c..840988e74e 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml
@@ -30,14 +30,14 @@ http = "1.1.0" lazy_static = "1.4.0" log = "0.4.17" mime = "0.3.17" -pyo3 = { version = "0.21.0", features = [ +pyo3 = { version = "0.24.2", features = [ "macros", "anyhow", "abi3", - "abi3-py38", + "abi3-py39", ] } -pyo3-log = "0.10.0" -pythonize = "0.21.0" +pyo3-log = "0.12.0" +pythonize = "0.24.0" regex = "1.6.0" sha2 = "0.10.8" serde = { version = "1.0.144", features = ["derive"] } diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs
index 4fea035b96..28537e187e 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs
@@ -60,6 +60,7 @@ fn bench_match_exact(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -105,6 +106,7 @@ fn bench_match_word(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -150,6 +152,7 @@ fn bench_match_word_miss(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -195,6 +198,7 @@ fn bench_eval_message(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -205,6 +209,7 @@ fn bench_eval_message(b: &mut Bencher) { false, false, false, + false, ); b.iter(|| eval.run(&rules, Some("bob"), Some("person"))); diff --git a/rust/src/acl/mod.rs b/rust/src/acl/mod.rs
index 982720ba90..57b45475fd 100644 --- a/rust/src/acl/mod.rs +++ b/rust/src/acl/mod.rs
@@ -32,14 +32,14 @@ use crate::push::utils::{glob_to_regex, GlobMatchType}; /// Called when registering modules with python. pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { - let child_module = PyModule::new_bound(py, "acl")?; + let child_module = PyModule::new(py, "acl")?; child_module.add_class::<ServerAclEvaluator>()?; m.add_submodule(&child_module)?; // We need to manually add the module to sys.modules to make `from // synapse.synapse_rust import acl` work. - py.import_bound("sys")? + py.import("sys")? .getattr("modules")? .set_item("synapse.synapse_rust.acl", child_module)?; diff --git a/rust/src/events/filter.rs b/rust/src/events/filter.rs new file mode 100644
index 0000000000..7e39972c62 --- /dev/null +++ b/rust/src/events/filter.rs
@@ -0,0 +1,107 @@ +/* + * This file is licensed under the Affero General Public License (AGPL) version 3. + * + * Copyright (C) 2024 New Vector, Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * See the GNU Affero General Public License for more details: + * <https://www.gnu.org/licenses/agpl-3.0.html>. + */ + +use std::collections::HashMap; + +use pyo3::{exceptions::PyValueError, pyfunction, PyResult}; + +use crate::{ + identifier::UserID, + matrix_const::{ + HISTORY_VISIBILITY_INVITED, HISTORY_VISIBILITY_JOINED, MEMBERSHIP_INVITE, MEMBERSHIP_JOIN, + }, +}; + +#[pyfunction(name = "event_visible_to_server")] +pub fn event_visible_to_server_py( + sender: String, + target_server_name: String, + history_visibility: String, + erased_senders: HashMap<String, bool>, + partial_state_invisible: bool, + memberships: Vec<(String, String)>, // (state_key, membership) +) -> PyResult<bool> { + event_visible_to_server( + sender, + target_server_name, + history_visibility, + erased_senders, + partial_state_invisible, + memberships, + ) + .map_err(|e| PyValueError::new_err(format!("{e}"))) +} + +/// Return whether the target server is allowed to see the event. +/// +/// For a fully stated room, the target server is allowed to see an event E if: +/// - the state at E has world readable or shared history vis, OR +/// - the state at E says that the target server is in the room. +/// +/// For a partially stated room, the target server is allowed to see E if: +/// - E was created by this homeserver, AND: +/// - the partial state at E has world readable or shared history vis, OR +/// - the partial state at E says that the target server is in the room. +pub fn event_visible_to_server( + sender: String, + target_server_name: String, + history_visibility: String, + erased_senders: HashMap<String, bool>, + partial_state_invisible: bool, + memberships: Vec<(String, String)>, // (state_key, membership) +) -> anyhow::Result<bool> { + if let Some(&erased) = erased_senders.get(&sender) { + if erased { + return Ok(false); + } + } + + if partial_state_invisible { + return Ok(false); + } + + if history_visibility != HISTORY_VISIBILITY_INVITED + && history_visibility != HISTORY_VISIBILITY_JOINED + { + return Ok(true); + } + + let mut visible = false; + for (state_key, membership) in memberships { + let state_key = UserID::try_from(state_key.as_ref()) + .map_err(|e| anyhow::anyhow!(format!("invalid user_id ({state_key}): {e}")))?; + if state_key.server_name() != target_server_name { + return Err(anyhow::anyhow!( + "state_key.server_name ({}) does not match target_server_name ({target_server_name})", + state_key.server_name() + )); + } + + match membership.as_str() { + MEMBERSHIP_INVITE => { + if history_visibility == HISTORY_VISIBILITY_INVITED { + visible = true; + break; + } + } + MEMBERSHIP_JOIN => { + visible = true; + break; + } + _ => continue, + } + } + + Ok(visible) +} diff --git a/rust/src/events/internal_metadata.rs b/rust/src/events/internal_metadata.rs
index ad87825f16..eeb6074c10 100644 --- a/rust/src/events/internal_metadata.rs +++ b/rust/src/events/internal_metadata.rs
@@ -41,9 +41,11 @@ use pyo3::{ pybacked::PyBackedStr, pyclass, pymethods, types::{PyAnyMethods, PyDict, PyDictMethods, PyString}, - Bound, IntoPy, PyAny, PyObject, PyResult, Python, + Bound, IntoPyObject, PyAny, PyObject, PyResult, Python, }; +use crate::UnwrapInfallible; + /// Definitions of the various fields of the internal metadata. #[derive(Clone)] enum EventInternalMetadataData { @@ -60,31 +62,59 @@ enum EventInternalMetadataData { impl EventInternalMetadataData { /// Convert the field to its name and python object. - fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, PyObject) { + fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, Bound<'a, PyAny>) { match self { - EventInternalMetadataData::OutOfBandMembership(o) => { - (pyo3::intern!(py, "out_of_band_membership"), o.into_py(py)) - } - EventInternalMetadataData::SendOnBehalfOf(o) => { - (pyo3::intern!(py, "send_on_behalf_of"), o.into_py(py)) - } - EventInternalMetadataData::RecheckRedaction(o) => { - (pyo3::intern!(py, "recheck_redaction"), o.into_py(py)) - } - EventInternalMetadataData::SoftFailed(o) => { - (pyo3::intern!(py, "soft_failed"), o.into_py(py)) - } - EventInternalMetadataData::ProactivelySend(o) => { - (pyo3::intern!(py, "proactively_send"), o.into_py(py)) - } - EventInternalMetadataData::Redacted(o) => { - (pyo3::intern!(py, "redacted"), o.into_py(py)) - } - EventInternalMetadataData::TxnId(o) => (pyo3::intern!(py, "txn_id"), o.into_py(py)), - EventInternalMetadataData::TokenId(o) => (pyo3::intern!(py, "token_id"), o.into_py(py)), - EventInternalMetadataData::DeviceId(o) => { - (pyo3::intern!(py, "device_id"), o.into_py(py)) - } + EventInternalMetadataData::OutOfBandMembership(o) => ( + pyo3::intern!(py, "out_of_band_membership"), + o.into_pyobject(py) + .unwrap_infallible() + .to_owned() + .into_any(), + ), + EventInternalMetadataData::SendOnBehalfOf(o) => ( + pyo3::intern!(py, "send_on_behalf_of"), + o.into_pyobject(py).unwrap_infallible().into_any(), + ), + EventInternalMetadataData::RecheckRedaction(o) => ( + pyo3::intern!(py, "recheck_redaction"), + o.into_pyobject(py) + .unwrap_infallible() + .to_owned() + .into_any(), + ), + EventInternalMetadataData::SoftFailed(o) => ( + pyo3::intern!(py, "soft_failed"), + o.into_pyobject(py) + .unwrap_infallible() + .to_owned() + .into_any(), + ), + EventInternalMetadataData::ProactivelySend(o) => ( + pyo3::intern!(py, "proactively_send"), + o.into_pyobject(py) + .unwrap_infallible() + .to_owned() + .into_any(), + ), + EventInternalMetadataData::Redacted(o) => ( + pyo3::intern!(py, "redacted"), + o.into_pyobject(py) + .unwrap_infallible() + .to_owned() + .into_any(), + ), + EventInternalMetadataData::TxnId(o) => ( + pyo3::intern!(py, "txn_id"), + o.into_pyobject(py).unwrap_infallible().into_any(), + ), + EventInternalMetadataData::TokenId(o) => ( + pyo3::intern!(py, "token_id"), + o.into_pyobject(py).unwrap_infallible().into_any(), + ), + EventInternalMetadataData::DeviceId(o) => ( + pyo3::intern!(py, "device_id"), + o.into_pyobject(py).unwrap_infallible().into_any(), + ), } } @@ -247,7 +277,7 @@ impl EventInternalMetadata { /// /// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here. fn get_dict(&self, py: Python<'_>) -> PyResult<PyObject> { - let dict = PyDict::new_bound(py); + let dict = PyDict::new(py); for entry in &self.data { let (key, value) = entry.to_python_pair(py); diff --git a/rust/src/events/mod.rs b/rust/src/events/mod.rs
index a4ade1a178..209efb917b 100644 --- a/rust/src/events/mod.rs +++ b/rust/src/events/mod.rs
@@ -22,21 +22,23 @@ use pyo3::{ types::{PyAnyMethods, PyModule, PyModuleMethods}, - Bound, PyResult, Python, + wrap_pyfunction, Bound, PyResult, Python, }; +pub mod filter; mod internal_metadata; /// Called when registering modules with python. pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { - let child_module = PyModule::new_bound(py, "events")?; + let child_module = PyModule::new(py, "events")?; child_module.add_class::<internal_metadata::EventInternalMetadata>()?; + child_module.add_function(wrap_pyfunction!(filter::event_visible_to_server_py, m)?)?; m.add_submodule(&child_module)?; // We need to manually add the module to sys.modules to make `from // synapse.synapse_rust import events` work. - py.import_bound("sys")? + py.import("sys")? .getattr("modules")? .set_item("synapse.synapse_rust.events", child_module)?; diff --git a/rust/src/http.rs b/rust/src/http.rs
index af052ab721..63ed05be54 100644 --- a/rust/src/http.rs +++ b/rust/src/http.rs
@@ -70,7 +70,7 @@ pub fn http_request_from_twisted(request: &Bound<'_, PyAny>) -> PyResult<Request let headers_iter = request .getattr("requestHeaders")? .call_method0("getAllRawHeaders")? - .iter()?; + .try_iter()?; for header in headers_iter { let header = header?; diff --git a/rust/src/identifier.rs b/rust/src/identifier.rs new file mode 100644
index 0000000000..b70f6a30c7 --- /dev/null +++ b/rust/src/identifier.rs
@@ -0,0 +1,252 @@ +/* + * This file is licensed under the Affero General Public License (AGPL) version 3. + * + * Copyright (C) 2024 New Vector, Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * See the GNU Affero General Public License for more details: + * <https://www.gnu.org/licenses/agpl-3.0.html>. + */ + +//! # Matrix Identifiers +//! +//! This module contains definitions and utilities for working with matrix identifiers. + +use std::{fmt, ops::Deref}; + +/// Errors that can occur when parsing a matrix identifier. +#[derive(Clone, Debug, PartialEq)] +pub enum IdentifierError { + IncorrectSigil, + MissingColon, +} + +impl fmt::Display for IdentifierError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// A Matrix user_id. +#[derive(Clone, Debug, PartialEq)] +pub struct UserID(String); + +impl UserID { + /// Returns the `localpart` of the user_id. + pub fn localpart(&self) -> &str { + &self[1..self.colon_pos()] + } + + /// Returns the `server_name` / `domain` of the user_id. + pub fn server_name(&self) -> &str { + &self[self.colon_pos() + 1..] + } + + /// Returns the position of the ':' inside of the user_id. + /// Used when splitting the user_id into it's respective parts. + fn colon_pos(&self) -> usize { + self.find(':').unwrap() + } +} + +impl TryFrom<&str> for UserID { + type Error = IdentifierError; + + /// Will try creating a `UserID` from the provided `&str`. + /// Can fail if the user_id is incorrectly formatted. + fn try_from(s: &str) -> Result<Self, Self::Error> { + if !s.starts_with('@') { + return Err(IdentifierError::IncorrectSigil); + } + + if s.find(':').is_none() { + return Err(IdentifierError::MissingColon); + } + + Ok(UserID(s.to_string())) + } +} + +impl TryFrom<String> for UserID { + type Error = IdentifierError; + + /// Will try creating a `UserID` from the provided `&str`. + /// Can fail if the user_id is incorrectly formatted. + fn try_from(s: String) -> Result<Self, Self::Error> { + if !s.starts_with('@') { + return Err(IdentifierError::IncorrectSigil); + } + + if s.find(':').is_none() { + return Err(IdentifierError::MissingColon); + } + + Ok(UserID(s)) + } +} + +impl<'de> serde::Deserialize<'de> for UserID { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: serde::Deserializer<'de>, + { + let s: String = serde::Deserialize::deserialize(deserializer)?; + UserID::try_from(s).map_err(serde::de::Error::custom) + } +} + +impl Deref for UserID { + type Target = str; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl fmt::Display for UserID { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// A Matrix room_id. +#[derive(Clone, Debug, PartialEq)] +pub struct RoomID(String); + +impl RoomID { + /// Returns the `localpart` of the room_id. + pub fn localpart(&self) -> &str { + &self[1..self.colon_pos()] + } + + /// Returns the `server_name` / `domain` of the room_id. + pub fn server_name(&self) -> &str { + &self[self.colon_pos() + 1..] + } + + /// Returns the position of the ':' inside of the room_id. + /// Used when splitting the room_id into it's respective parts. + fn colon_pos(&self) -> usize { + self.find(':').unwrap() + } +} + +impl TryFrom<&str> for RoomID { + type Error = IdentifierError; + + /// Will try creating a `RoomID` from the provided `&str`. + /// Can fail if the room_id is incorrectly formatted. + fn try_from(s: &str) -> Result<Self, Self::Error> { + if !s.starts_with('!') { + return Err(IdentifierError::IncorrectSigil); + } + + if s.find(':').is_none() { + return Err(IdentifierError::MissingColon); + } + + Ok(RoomID(s.to_string())) + } +} + +impl TryFrom<String> for RoomID { + type Error = IdentifierError; + + /// Will try creating a `RoomID` from the provided `String`. + /// Can fail if the room_id is incorrectly formatted. + fn try_from(s: String) -> Result<Self, Self::Error> { + if !s.starts_with('!') { + return Err(IdentifierError::IncorrectSigil); + } + + if s.find(':').is_none() { + return Err(IdentifierError::MissingColon); + } + + Ok(RoomID(s)) + } +} + +impl<'de> serde::Deserialize<'de> for RoomID { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: serde::Deserializer<'de>, + { + let s: String = serde::Deserialize::deserialize(deserializer)?; + RoomID::try_from(s).map_err(serde::de::Error::custom) + } +} + +impl Deref for RoomID { + type Target = str; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl fmt::Display for RoomID { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// A Matrix event_id. +#[derive(Clone, Debug, PartialEq)] +pub struct EventID(String); + +impl TryFrom<&str> for EventID { + type Error = IdentifierError; + + /// Will try creating a `EventID` from the provided `&str`. + /// Can fail if the event_id is incorrectly formatted. + fn try_from(s: &str) -> Result<Self, Self::Error> { + if !s.starts_with('$') { + return Err(IdentifierError::IncorrectSigil); + } + + Ok(EventID(s.to_string())) + } +} + +impl TryFrom<String> for EventID { + type Error = IdentifierError; + + /// Will try creating a `EventID` from the provided `String`. + /// Can fail if the event_id is incorrectly formatted. + fn try_from(s: String) -> Result<Self, Self::Error> { + if !s.starts_with('$') { + return Err(IdentifierError::IncorrectSigil); + } + + Ok(EventID(s)) + } +} + +impl<'de> serde::Deserialize<'de> for EventID { + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: serde::Deserializer<'de>, + { + let s: String = serde::Deserialize::deserialize(deserializer)?; + EventID::try_from(s).map_err(serde::de::Error::custom) + } +} + +impl Deref for EventID { + type Target = str; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl fmt::Display for EventID { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/rust/src/lib.rs b/rust/src/lib.rs
index 06477880b9..d751889874 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs
@@ -1,3 +1,5 @@ +use std::convert::Infallible; + use lazy_static::lazy_static; use pyo3::prelude::*; use pyo3_log::ResetHandle; @@ -6,6 +8,8 @@ pub mod acl; pub mod errors; pub mod events; pub mod http; +pub mod identifier; +pub mod matrix_const; pub mod push; pub mod rendezvous; @@ -50,3 +54,16 @@ fn synapse_rust(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { Ok(()) } + +pub trait UnwrapInfallible<T> { + fn unwrap_infallible(self) -> T; +} + +impl<T> UnwrapInfallible<T> for Result<T, Infallible> { + fn unwrap_infallible(self) -> T { + match self { + Ok(val) => val, + Err(never) => match never {}, + } + } +} diff --git a/rust/src/matrix_const.rs b/rust/src/matrix_const.rs new file mode 100644
index 0000000000..f75f3bd7c3 --- /dev/null +++ b/rust/src/matrix_const.rs
@@ -0,0 +1,28 @@ +/* + * This file is licensed under the Affero General Public License (AGPL) version 3. + * + * Copyright (C) 2024 New Vector, Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * See the GNU Affero General Public License for more details: + * <https://www.gnu.org/licenses/agpl-3.0.html>. + */ + +//! # Matrix Constants +//! +//! This module contains definitions for constant values described by the matrix specification. + +pub const HISTORY_VISIBILITY_WORLD_READABLE: &str = "world_readable"; +pub const HISTORY_VISIBILITY_SHARED: &str = "shared"; +pub const HISTORY_VISIBILITY_INVITED: &str = "invited"; +pub const HISTORY_VISIBILITY_JOINED: &str = "joined"; + +pub const MEMBERSHIP_BAN: &str = "ban"; +pub const MEMBERSHIP_LEAVE: &str = "leave"; +pub const MEMBERSHIP_KNOCK: &str = "knock"; +pub const MEMBERSHIP_INVITE: &str = "invite"; +pub const MEMBERSHIP_JOIN: &str = "join"; diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs
index 74f02d6001..e0832ada1c 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs
@@ -81,7 +81,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ ))]), actions: Cow::Borrowed(&[Action::Notify]), default: true, - default_enabled: false, + default_enabled: true, }, PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"), diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs
index 2f4b6d47bb..db406acb88 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs
@@ -105,6 +105,9 @@ pub struct PushRuleEvaluator { /// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same /// flag as MSC1767 (extensible events core). msc3931_enabled: bool, + + // If MSC4210 (remove legacy mentions) is enabled. + msc4210_enabled: bool, } #[pymethods] @@ -122,6 +125,7 @@ impl PushRuleEvaluator { related_event_match_enabled, room_version_feature_flags, msc3931_enabled, + msc4210_enabled, ))] pub fn py_new( flattened_keys: BTreeMap<String, JsonValue>, @@ -133,6 +137,7 @@ impl PushRuleEvaluator { related_event_match_enabled: bool, room_version_feature_flags: Vec<String>, msc3931_enabled: bool, + msc4210_enabled: bool, ) -> Result<Self, Error> { let body = match flattened_keys.get("content.body") { Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(), @@ -150,6 +155,7 @@ impl PushRuleEvaluator { related_event_match_enabled, room_version_feature_flags, msc3931_enabled, + msc4210_enabled, }) } @@ -161,6 +167,7 @@ impl PushRuleEvaluator { /// /// Returns the set of actions, if any, that match (filtering out any /// `dont_notify` and `coalesce` actions). + #[pyo3(signature = (push_rules, user_id=None, display_name=None))] pub fn run( &self, push_rules: &FilteredPushRules, @@ -176,7 +183,8 @@ impl PushRuleEvaluator { // For backwards-compatibility the legacy mention rules are disabled // if the event contains the 'm.mentions' property. - if self.has_mentions + // Additionally, MSC4210 always disables the legacy rules. + if (self.has_mentions || self.msc4210_enabled) && (rule_id == "global/override/.m.rule.contains_display_name" || rule_id == "global/content/.m.rule.contains_user_name" || rule_id == "global/override/.m.rule.roomnotif") @@ -229,6 +237,7 @@ impl PushRuleEvaluator { } /// Check if the given condition matches. + #[pyo3(signature = (condition, user_id=None, display_name=None))] fn matches( &self, condition: Condition, @@ -526,6 +535,7 @@ fn push_rule_evaluator() { true, vec![], true, + false, ) .unwrap(); @@ -555,6 +565,7 @@ fn test_requires_room_version_supports_condition() { false, flags, true, + false, ) .unwrap(); @@ -582,7 +593,7 @@ fn test_requires_room_version_supports_condition() { }; let rules = PushRules::new(vec![custom_rule]); result = evaluator.run( - &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false), + &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false), None, None, ); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs
index 2a452b69a3..bd0e853ac3 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs
@@ -65,8 +65,8 @@ use anyhow::{Context, Error}; use log::warn; use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; -use pyo3::types::{PyBool, PyList, PyLong, PyString}; -use pythonize::{depythonize_bound, pythonize}; +use pyo3::types::{PyBool, PyInt, PyList, PyString}; +use pythonize::{depythonize, pythonize, PythonizeError}; use serde::de::Error as _; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -79,7 +79,7 @@ pub mod utils; /// Called when registering modules with python. pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { - let child_module = PyModule::new_bound(py, "push")?; + let child_module = PyModule::new(py, "push")?; child_module.add_class::<PushRule>()?; child_module.add_class::<PushRules>()?; child_module.add_class::<FilteredPushRules>()?; @@ -90,7 +90,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> // We need to manually add the module to sys.modules to make `from // synapse.synapse_rust import push` work. - py.import_bound("sys")? + py.import("sys")? .getattr("modules")? .set_item("synapse.synapse_rust.push", child_module)?; @@ -182,12 +182,16 @@ pub enum Action { Unknown(Value), } -impl IntoPy<PyObject> for Action { - fn into_py(self, py: Python<'_>) -> PyObject { +impl<'py> IntoPyObject<'py> for Action { + type Target = PyAny; + type Output = Bound<'py, Self::Target>; + type Error = PythonizeError; + + fn into_pyobject(self, py: Python<'py>) -> Result<Self::Output, Self::Error> { // When we pass the `Action` struct to Python we want it to be converted // to a dict. We use `pythonize`, which converts the struct using the // `serde` serialization. - pythonize(py, &self).expect("valid action") + pythonize(py, &self) } } @@ -270,13 +274,13 @@ pub enum SimpleJsonValue { } impl<'source> FromPyObject<'source> for SimpleJsonValue { - fn extract(ob: &'source PyAny) -> PyResult<Self> { + fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> { if let Ok(s) = ob.downcast::<PyString>() { Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string()))) // A bool *is* an int, ensure we try bool first. } else if let Ok(b) = ob.downcast::<PyBool>() { Ok(SimpleJsonValue::Bool(b.extract()?)) - } else if let Ok(i) = ob.downcast::<PyLong>() { + } else if let Ok(i) = ob.downcast::<PyInt>() { Ok(SimpleJsonValue::Int(i.extract()?)) } else if ob.is_none() { Ok(SimpleJsonValue::Null) @@ -298,15 +302,19 @@ pub enum JsonValue { } impl<'source> FromPyObject<'source> for JsonValue { - fn extract(ob: &'source PyAny) -> PyResult<Self> { + fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> { if let Ok(l) = ob.downcast::<PyList>() { - match l.iter().map(SimpleJsonValue::extract).collect() { + match l + .iter() + .map(|it| SimpleJsonValue::extract_bound(&it)) + .collect() + { Ok(a) => Ok(JsonValue::Array(a)), Err(e) => Err(PyTypeError::new_err(format!( "Can't convert to JsonValue::Array: {e}" ))), } - } else if let Ok(v) = SimpleJsonValue::extract(ob) { + } else if let Ok(v) = SimpleJsonValue::extract_bound(ob) { Ok(JsonValue::Value(v)) } else { Err(PyTypeError::new_err(format!( @@ -363,15 +371,19 @@ pub enum KnownCondition { }, } -impl IntoPy<PyObject> for Condition { - fn into_py(self, py: Python<'_>) -> PyObject { - pythonize(py, &self).expect("valid condition") +impl<'source> IntoPyObject<'source> for Condition { + type Target = PyAny; + type Output = Bound<'source, Self::Target>; + type Error = PythonizeError; + + fn into_pyobject(self, py: Python<'source>) -> Result<Self::Output, Self::Error> { + pythonize(py, &self) } } impl<'source> FromPyObject<'source> for Condition { fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult<Self> { - Ok(depythonize_bound(ob.clone())?) + Ok(depythonize(ob)?) } } @@ -534,6 +546,7 @@ pub struct FilteredPushRules { msc3381_polls_enabled: bool, msc3664_enabled: bool, msc4028_push_encrypted_events: bool, + msc4210_enabled: bool, } #[pymethods] @@ -546,6 +559,7 @@ impl FilteredPushRules { msc3381_polls_enabled: bool, msc3664_enabled: bool, msc4028_push_encrypted_events: bool, + msc4210_enabled: bool, ) -> Self { Self { push_rules, @@ -554,6 +568,7 @@ impl FilteredPushRules { msc3381_polls_enabled, msc3664_enabled, msc4028_push_encrypted_events, + msc4210_enabled, } } @@ -596,6 +611,14 @@ impl FilteredPushRules { return false; } + if self.msc4210_enabled + && (rule.rule_id == "global/override/.m.rule.contains_display_name" + || rule.rule_id == "global/content/.m.rule.contains_user_name" + || rule.rule_id == "global/override/.m.rule.roomnotif") + { + return false; + } + true }) .map(|r| { diff --git a/rust/src/push/utils.rs b/rust/src/push/utils.rs
index 28ebed62c8..59536c9954 100644 --- a/rust/src/push/utils.rs +++ b/rust/src/push/utils.rs
@@ -23,7 +23,6 @@ use anyhow::bail; use anyhow::Context; use anyhow::Error; use lazy_static::lazy_static; -use regex; use regex::Regex; use regex::RegexBuilder; diff --git a/rust/src/rendezvous/mod.rs b/rust/src/rendezvous/mod.rs
index f69f45490f..3148e0f67a 100644 --- a/rust/src/rendezvous/mod.rs +++ b/rust/src/rendezvous/mod.rs
@@ -29,7 +29,7 @@ use pyo3::{ exceptions::PyValueError, pyclass, pymethods, types::{PyAnyMethods, PyModule, PyModuleMethods}, - Bound, Py, PyAny, PyObject, PyResult, Python, ToPyObject, + Bound, IntoPyObject, Py, PyAny, PyObject, PyResult, Python, }; use ulid::Ulid; @@ -37,6 +37,7 @@ use self::session::Session; use crate::{ errors::{NotFoundError, SynapseError}, http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt}, + UnwrapInfallible, }; mod session; @@ -46,7 +47,7 @@ fn prepare_headers(headers: &mut HeaderMap, session: &Session) { headers.typed_insert(AccessControlAllowOrigin::ANY); headers.typed_insert(AccessControlExposeHeaders::from_iter([ETAG])); headers.typed_insert(Pragma::no_cache()); - headers.typed_insert(CacheControl::new().with_no_store()); + headers.typed_insert(CacheControl::new().with_no_store().with_no_transform()); headers.typed_insert(session.etag()); headers.typed_insert(session.expires()); headers.typed_insert(session.last_modified()); @@ -125,7 +126,11 @@ impl RendezvousHandler { let base = Uri::try_from(format!("{base}_synapse/client/rendezvous")) .map_err(|_| PyValueError::new_err("Invalid base URI"))?; - let clock = homeserver.call_method0("get_clock")?.to_object(py); + let clock = homeserver + .call_method0("get_clock")? + .into_pyobject(py) + .unwrap_infallible() + .unbind(); // Construct a Python object so that we can get a reference to the // evict method and schedule it to run. @@ -187,10 +192,12 @@ impl RendezvousHandler { "url": uri, }) .to_string(); + let length = response.len() as _; let mut response = Response::new(response.as_bytes()); *response.status_mut() = StatusCode::CREATED; response.headers_mut().typed_insert(ContentType::json()); + response.headers_mut().typed_insert(ContentLength(length)); prepare_headers(response.headers_mut(), &session); http_response_to_twisted(twisted_request, response)?; @@ -288,6 +295,14 @@ impl RendezvousHandler { let mut response = Response::new(Bytes::new()); *response.status_mut() = StatusCode::ACCEPTED; prepare_headers(response.headers_mut(), session); + + // Even though this isn't mandated by the MSC, we set a Content-Type on the response. It + // doesn't do any harm as the body is empty, but this helps escape a bug in some reverse + // proxy/cache setup which strips the ETag header if there is no Content-Type set. + // Specifically, we noticed this behaviour when placing Synapse behind Cloudflare. + response.headers_mut().typed_insert(ContentType::text()); + response.headers_mut().typed_insert(ContentLength(0)); + http_response_to_twisted(twisted_request, response)?; Ok(()) @@ -304,6 +319,7 @@ impl RendezvousHandler { response .headers_mut() .typed_insert(AccessControlAllowOrigin::ANY); + response.headers_mut().typed_insert(ContentLength(0)); http_response_to_twisted(twisted_request, response)?; Ok(()) @@ -311,7 +327,7 @@ impl RendezvousHandler { } pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { - let child_module = PyModule::new_bound(py, "rendezvous")?; + let child_module = PyModule::new(py, "rendezvous")?; child_module.add_class::<RendezvousHandler>()?; @@ -319,7 +335,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> // We need to manually add the module to sys.modules to make `from // synapse.synapse_rust import rendezvous` work. - py.import_bound("sys")? + py.import("sys")? .getattr("modules")? .set_item("synapse.synapse_rust.rendezvous", child_module)?; diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml new file mode 100644
index 0000000000..641b316137 --- /dev/null +++ b/schema/synapse-config.schema.yaml
@@ -0,0 +1,5028 @@ +$schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json +$id: https://element-hq.github.io/synapse/schema/synapse/v1.132/synapse-config.schema.json +type: object +properties: + modules: + type: array + description: >- + Use the `module` sub-option to add modules under this option to extend + functionality. The `module` setting then has a sub-option, `config`, which + can be used to define some configuration for the `module`. + items: + type: object + properties: + module: + type: string + description: Path to the Python class of the module. + config: + type: object + description: Configuration options for the module. + default: [] + examples: + - - module: my_super_module.MySuperClass + config: + do_thing: true + - module: my_other_super_module.SomeClass + config: {} + server_name: + type: string + description: >- + This sets the public-facing domain of the server. + + + The `server_name` name will appear at the end of usernames and room + addresses created on your server. For example if the `server_name` was + example.com, usernames on your server would be in the format + `@user:example.com`. + + + In most cases you should avoid using a matrix specific subdomain such as + matrix.example.com or synapse.example.com as the `server_name` for the + same reasons you wouldn't use user@email.example.com as your email + address. See [here](../../delegate.md) for information on how to host + Synapse on a subdomain while preserving a clean `server_name`. + + + The `server_name` cannot be changed later so it is important to configure + this correctly before you start Synapse. It should be all lowercase and + may contain an explicit port. + examples: + - matrix.org + - localhost:8080 + pid_file: + type: ["string", "null"] + description: When running Synapse as a daemon, the file to store the pid in. + default: null + examples: + - DATADIR/homeserver.pid + daemonize: + type: boolean + description: >- + Specifies whether Synapse should be started as a daemon process. If + Synapse is being managed by [systemd](../../systemd-with-workers/), this + option must be omitted or set to `false`. + + + This can also be set by the `--daemonize` (`-D`) argument when starting + Synapse. + + + See `worker_daemonize` for more information on daemonizing workers. + default: false + examples: + - true + print_pidfile: + type: boolean + description: >- + Print the path to the pidfile just before daemonizing. + + + This can also be set by the `--print-pidfile` argument when starting Synapse. + default: false + examples: + - true + user_agent_suffix: + type: ["string", "null"] + description: >- + A suffix that is appended to the Synapse user-agent (ex. `Synapse/v1.123.0`). + default: null + examples: + - " (I'm a teapot; Linux x86_64)" + use_frozen_dicts: + type: boolean + description: >- + Determines whether we should freeze the internal dict object in + `FrozenEvent`. Freezing prevents bugs where we accidentally share e.g. + signature dicts. However, freezing a dict is expensive. + default: false + examples: + - true + web_client_location: + type: ["string", "null"] + description: The absolute URL to the web client which `/` will redirect to. + default: null + examples: + - "https://riot.example.com/" + public_baseurl: + type: ["string", "null"] + description: >- + The public-facing base URL that clients use to access this Homeserver (not + including _matrix/...). This is the same URL a user might enter into the + "Custom Homeserver URL" field on their client. If you use Synapse with a + reverse proxy, this should be the URL to reach Synapse via the proxy. + Otherwise, it should be the URL to reach Synapse's client HTTP listener + (see [`listeners`](#listeners) below). + + + If unset or null, `https://<server_name>/` is used. + default: null + examples: + - "https://example.com/" + serve_server_wellknown: + type: boolean + description: >- + By default, other servers will try to reach our server on port 8448, which + can be inconvenient in some environments. + + + Provided `https://<server_name>/` on port 443 is routed to Synapse, this + option configures Synapse to serve a file at + `https://<server_name>/.well-known/matrix/server`. This will tell other + servers to send traffic to port 443 instead. + + + This option currently defaults to false. + + + See [Delegation of incoming federation traffic](../../delegate.md) for + more information. + default: false + examples: + - true + extra_well_known_client_content: + type: object + description: >- + This option allows server runners to add arbitrary key-value pairs to the + [client-facing `.well-known` + response](https://spec.matrix.org/latest/client-server-api/#well-known-uri). + Note that the `public_baseurl` config option must be provided for Synapse + to serve a response to `/.well-known/matrix/client` at all. + + + If this option is provided, it parses the given yaml to json and serves it + on `/.well-known/matrix/client` endpoint alongside the standard + properties. + + + *Added in Synapse 1.62.0.* + examples: + - option1: value1 + option2: value2 + soft_file_limit: + type: integer + description: >- + Set the soft limit on the number of file descriptors synapse can use. Zero + is used to indicate synapse should set the soft limit to the hard limit. + default: 0 + examples: + - 3 + presence: + type: object + description: >- + Presence tracking allows users to see the state (e.g online/offline) of + other local and remote users. This option replaces the previous top-level + `use_presence` option. + properties: + enabled: + type: ["boolean", "string"] + description: >- + Set to false to disable presence tracking on this homeserver. + + + Can also be set to a special value of "untracked" which ignores + updates received via clients and federation, while still accepting + updates from the [module API](../../modules/index.md). + + + *The "untracked" option was added in Synapse 1.96.0.* + oneOf: + - type: boolean + - type: string + const: untracked + default: true + include_offline_users_on_sync: + type: boolean + description: >- + When clients perform an initial or `full_state` sync, presence results + for offline users are not included by default. Setting + `include_offline_users_on_sync` to `true` will always include offline + users in the results. + default: false + examples: + - enabled: false + include_offline_users_on_sync: false + require_auth_for_profile_requests: + type: boolean + description: >- + Whether to require authentication to retrieve profile data (avatars, + display names) of other users through the client API. Note that profile + data is also available via the federation API, unless + `allow_profile_lookup_over_federation` is set to false. + default: false + examples: + - true + limit_profile_requests_to_users_who_share_rooms: + type: boolean + description: >- + Use this option to require a user to share a room with another user in + order to retrieve their profile information. Only checked on Client-Server + requests. Profile requests from other servers should be checked by the + requesting server. + default: false + examples: + - true + include_profile_data_on_invite: + type: boolean + description: >- + Use this option to prevent a user's profile data from being retrieved and + displayed in a room until they have joined it. By default, a user's + profile data is included in an invite event, regardless of the values of + the above two settings, and whether or not the users share a server. + default: true + examples: + - false + allow_public_rooms_without_auth: + type: boolean + description: + If set to true, removes the need for authentication to access the server's + public rooms directory through the client API, meaning that anyone can + query the room directory. + default: false + examples: + - true + allow_public_rooms_over_federation: + type: boolean + description: >- + If set to true, allows any other homeserver to fetch the server's public + rooms directory via federation. + default: false + examples: + - true + default_room_version: + type: string + description: >- + The default room version for newly created rooms on this server. + + + Known room versions are listed + [here](https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions) + + + For example, for room version 1, `default_room_version` should be set to + "1". + + + _Changed in Synapse 1.76:_ the default version room version was increased + from [9](https://spec.matrix.org/v1.5/rooms/v9/) to + [10](https://spec.matrix.org/v1.5/rooms/v10/). + default: "10" + examples: + - "8" + gc_thresholds: + type: ["array", "null"] + description: >- + The garbage collection threshold parameters to pass to `gc.set_threshold`, + if defined. + default: null + examples: + - - 700 + - 10 + - 10 + gc_min_interval: + type: array + description: >- + The minimum time in seconds between each GC for a generation, regardless + of the GC thresholds. This ensures that we don't do GC too frequently. A + value of `[1s, 10s, 30s]` indicates that a second must pass between + consecutive generation 0 GCs, etc. + default: + - 1s + - 10s + - 30s + examples: + - - 0.5s + - 30s + - 1m + filter_timeline_limit: + type: integer + description: >- + Set the limit on the returned events in the timeline in the get and sync + operations. A value of -1 means no upper limit. + default: 100 + examples: + - 5000 + block_non_admin_invites: + type: boolean + description: >- + Whether room invites to users on this server should be blocked (except + those sent by local server admins). + default: false + examples: + - true + enable_search: + type: boolean + description: >- + If set to false, new messages will not be indexed for searching and users + will receive errors when searching for messages. + default: true + examples: + - false + ip_range_blacklist: + type: array + description: >- + This option prevents outgoing requests from being sent to the specified + blacklisted IP address CIDR ranges. If this option is not specified then + it defaults to private IP address ranges (see the example below). + + + The blacklist applies to the outbound requests for federation, identity + servers, push servers, and for checking key validity for third-party + invite events. + + + (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly + listed here, since they correspond to unroutable addresses.) + + + This option replaces `federation_ip_range_blacklist` in Synapse v1.25.0. + + + Note: The value is ignored when an HTTP proxy is in use. + items: + type: string + default: + - 127.0.0.0/8 + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + - 100.64.0.0/10 + - 192.0.0.0/24 + - 169.254.0.0/16 + - 192.88.99.0/24 + - 198.18.0.0/15 + - 192.0.2.0/24 + - 198.51.100.0/24 + - 203.0.113.0/24 + - 224.0.0.0/4 + - "::1/128" + - "fe80::/10" + - "fc00::/7" + - "2001:db8::/32" + - "ff00::/8" + - "fec0::/10" + ip_range_whitelist: + type: array + description: >- + List of IP address CIDR ranges that should be allowed for federation, + identity servers, push servers, and for checking key validity for + third-party invite events. This is useful for specifying exceptions to + wide-ranging blacklisted target IP ranges – e.g. for communication with a + push server only visible in your network. + + + This whitelist overrides `ip_range_blacklist`. + items: + type: string + default: [] + examples: + - - 192.168.1.1 + listeners: + type: array + description: >- + List of ports that Synapse should listen on, their purpose and their + configuration. + + + Valid resource names are: + + + * `client`: the client-server API (/_matrix/client). Also implies `media` + and `static`. If configuring the main process, the Synapse Admin API + (/_synapse/admin) is also implied. + + + * `consent`: user consent forms (/_matrix/consent). See + [here](../../consent_tracking.md) for more. + + + * `federation`: the server-server API (/_matrix/federation). Also implies + `media`, `keys`, `openid` + + + * `keys`: the key discovery API (/_matrix/key). + + + * `media`: the media API (/_matrix/media). + + + * `metrics`: the metrics interface. See [here](../../metrics-howto.md). + (Not compatible with Unix sockets) + + + * `openid`: OpenID authentication. See [here](../../openid.md). + + + * `replication`: the HTTP replication API (/_synapse/replication). See + [here](../../workers.md). + + + * `static`: static resources under synapse/static (/_matrix/static). + (Mostly useful for "fallback authentication".) + + + * `health`: the [health check + endpoint](../../reverse_proxy.md#health-check-endpoint). This endpoint is + by default active for all other resources and does not have to be + activated separately. This is only useful if you want to use the health + endpoint explicitly on a dedicated port or for [workers](../../workers.md) + and containers without listener e.g. [application + services](../../workers.md#notifying-application-services). + items: + type: object + properties: + port: + type: integer + description: The TCP port to bind to. + tag: + type: ["string", "null"] + description: >- + An alias for the port in the logger name. If set the tag is logged + instead of the port. Default to `None`, is optional and only valid + for listener with `type: http`. See the docs [request log + format](../administration/request_log.md). + bind_addresses: + type: ["array", "null"] + description: >- + A list of local addresses to listen on. The default is "all local + interfaces". + items: + type: string + type: + type: string + description: >- + The type of listener. Normally `http`, but other valid options are + [`manhole`](../../manhole.md) and + [`metrics`](../../metrics-howto.md). + enum: + - http + - manhole + - metrics + tls: + type: boolean + description: >- + Set to true to enable TLS for this listener. Will use the TLS + key/cert specified in tls_private_key_path/tls_certificate_path. + x_forwarded: + type: boolean + description: >- + Only valid for an `http` listener. Set to true to use the + X-Forwarded-For header as the client IP. Useful when Synapse is + behind a [reverse-proxy](../../reverse_proxy.md). + request_id_header: + type: ["string", "null"] + description: >- + The header extracted from each incoming request that is used as the + basis for the request ID. The request ID is used in + [logs](../administration/request_log.md#request-log-format) and + tracing to correlate and match up requests. When unset, Synapse will + automatically generate sequential request IDs. This option is useful + when Synapse is behind a [reverse-proxy](../../reverse_proxy.md). + + + _Added in Synapse 1.68.0._ + resources: + type: array + description: >- + Only valid for an `http` listener. A list of resources to host on this port. + items: + type: object + properties: + names: + type: array + description: >- + A list of names of HTTP resources. See below for a list of + valid resource names. + items: + type: string + enum: + - client + - consent + - federation + - keys + - media + - metrics + - openid + - replication + - static + - health + compress: + type: boolean + description: >- + Set to true to enable gzip compression on HTTP bodies for this + resource. This is currently only supported with the `client`, + `consent`, `metrics` and `federation` resources. + additional_resources: + type: object + description: >- + Only valid for an `http` listener. A map of additional endpoints + which should be loaded via dynamic modules. + additionalProperties: + type: object + properties: + module: + type: string + config: + type: object + path: + type: string + description: >- + A path and filename for a Unix socket. Make sure it is located in a + directory with read and write permissions, and that it already + exists (the directory will not be created). Defaults to `None`. + + * **Note**: The use of both `path` and `port` options for the same + `listener` is not compatible. + + * The `x_forwarded` option defaults to true when using Unix sockets + and can be omitted. + + * Other options that would not make sense to use with a UNIX socket, + such as `bind_addresses` and `tls` will be ignored and can be + removed. + + + _Added in Synapse 1.89.0_: Unix socket support + mode: + type: ["integer", "null"] + description: >- + The file permissions to set on the UNIX socket. Defaults to `666` if + unset or null. + + + **Note:** Must be set as `type: http` (does not support `metrics` + and `manhole`). Also make sure that `metrics` is not included in + `resources` -> `names` + + + _Added in Synapse 1.89.0_: Unix socket support + default: [] + examples: + - - port: 8448 + type: http + tls: true + resources: + - names: + - client + - federation + - - port: 8008 + tls: false + type: http + x_forwarded: true + bind_addresses: + - "::1" + - 127.0.0.1 + resources: + - names: + - client + - federation + compress: false + additional_resources: + /_matrix/my/custom/endpoint: + module: my_module.CustomRequestHandler + config: {} + - port: 9000 + bind_addresses: + - "::1" + - 127.0.0.1 + type: manhole + - - path: /run/synapse/main_public.sock + type: http + resources: + - names: + - client + - federation + manhole: + type: ["integer", "null"] + description: >- + Turn on the Twisted telnet manhole service on the given port. + + + This can also be set by the `--manhole` argument when starting Synapse. + default: null + examples: + - 1234 + manhole_settings: + type: object + description: >- + Connection settings for the manhole. You can find more information on the + manhole [here](../../manhole.md). + properties: + username: + type: ["string", "null"] + description: The username for the manhole. This defaults to "matrix". + password: + type: ["string", "null"] + description: The password for the manhole. This defaults to "rabbithole". + ssh_priv_key_path: + type: ["string", "null"] + description: >- + The private SSH key used to encrypt the manhole traffic. If left + unset, then hardcoded and non-secret keys are used, which could allow + traffic to be intercepted if sent over a public network. + ssh_pub_key_path: + type: ["string", "null"] + description: >- + The public SSH key corresponsing to `ssh_priv_key_path`. If left + unset, a hardcoded key is used. + examples: + - username: manhole + password: mypassword + ssh_priv_key_path: CONFDIR/id_rsa + ssh_pub_key_path: CONFDIR/id_rsa.pub + dummy_events_threshold: + type: integer + description: >- + Forward extremities can build up in a room due to networking delays + between homeservers. Once this happens in a large room, calculation of the + state of that room can become quite expensive. To mitigate this, once the + number of forward extremities reaches a given threshold, Synapse will send + an `org.matrix.dummy_event` event, which will reduce the forward + extremities in the room. + + + This setting defines the threshold (i.e. number of forward extremities in + the room) at which dummy events are sent. + default: 10 + examples: + - 5 + delete_stale_devices_after: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + An optional duration. If set, Synapse will run a daily background task to + log out and delete any device that hasn't been accessed for more than the + specified amount of time. + + + A value of null means devices are never pruned. + + + **Note:** This task will always run on the main process, regardless of the + value of `run_background_tasks_on`. This is due to workers currently not + having the ability to delete devices. + default: null + examples: + - 1y + max_event_delay_duration: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + The maximum allowed duration by which sent events can be delayed, as + per + [MSC4140](https://github.com/matrix-org/matrix-spec-proposals/pull/4140). + Must be a positive value if set. + + + If null or unset, sending of delayed events is disallowed. + default: null + examples: + - 24h + user_types: + type: object + description: >- + Configuration settings related to the user types feature. + properties: + default_user_type: + type: ["string", "null"] + description: "The default user type to use for registering new users when no value has been specified. Defaults to none." + default: null + extra_user_types: + type: array + description: "Array of additional user types to allow. These are treated as real users." + items: + type: string + default: [] + examples: + - default_user_type: "custom" + extra_user_types: ["custom", "custom2"] + admin_contact: + type: ["string", "null"] + description: How to reach the server admin, used in `ResourceLimitError`. + default: null + examples: + - "mailto:admin@server.com" + hs_disabled: + type: boolean + description: >- + Blocks users from connecting to the homeserver and provides the + human-readable reason given in `hs_disabled_message`. + default: false + examples: + - true + hs_disabled_message: + type: string + description: Human-readable reason why the connection was blocked. + default: Homeserver is currently blocked + examples: + - Reason for why the HS is blocked + limit_usage_by_mau: + type: boolean + description: >- + This option disables/enables monthly active user blocking. Used in cases + where the admin or server owner wants to limit to the number of monthly + active users. When enabled and a limit is reached the server returns a + `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`. If + this is enabled, a value for `max_mau_value` must also be set. + + + See [Monthly Active Users](../administration/monthly_active_users.md) for + details on how to configure MAU. + default: false + examples: + - true + max_mau_value: + type: integer + description: >- + This option sets the hard limit of monthly active users above which the + server will start blocking user actions if `limit_usage_by_mau` is + enabled. + default: 0 + examples: + - 50 + mau_trial_days: + type: integer + description: >- + The option `mau_trial_days` is a means to add a grace period for active + users. It means that users must be active for the specified number of days + before they can be considered active and guards against the case where + lots of users sign up in a short space of time never to return after their + initial session. + default: 0 + examples: + - 5 + mau_appservice_trial_days: + type: object + description: >- + The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but + applies a different trial number if the user was registered by an + appservice. A value of 0 means no trial days are applied. Appservices not + listed in this dictionary use the value of `mau_trial_days` instead. + additionalProperties: + type: integer + default: {} + examples: + - my_appservice_id: 3 + another_appservice_id: 6 + mau_limit_alerting: + type: boolean + description: >- + Limit client-side alerting should the mau limit be reached. This is useful + for small instances where the admin has 5 mau seats (say) for 5 specific + people and no interest increasing the mau limit further. + default: true + examples: + - false + mau_stats_only: + type: boolean + description: >- + If enabled, the metrics for the number of monthly active users will be + populated, however no one will be limited based on these numbers. If + `limit_usage_by_mau` is true, this is implied to be true. + default: false + examples: + - true + server_context: + type: ["string", "null"] + description: >- + This option is used by phonehome stats to group together related servers. + default: null + examples: + - context + limit_remote_rooms: + type: object + description: >- + When this option is enabled, the room "complexity" will be checked before + a user joins a new remote room. If it is above the complexity limit, the + server will disallow joining, or will instantly leave. This is useful for + homeservers that are resource-constrained. Room complexity is an arbitrary + measure based on factors such as the number of users in the room. + properties: + enabled: + type: boolean + description: Whether this check is enabled. + default: false + complexity: + type: number + description: The limit above which rooms cannot be joined. + default: 1.0 + complexity_error: + type: string + description: >- + Override the error which is returned when the room is too complex with + a custom message. + default: >- + Your homeserver is unable to join rooms this large or complex. Please + speak to your server administrator, or upgrade your instance to join + this room. + admins_can_join: + type: boolean + description: Allow server admins to join complex rooms. + default: false + examples: + - enabled: true + complexity: 0.5 + complexity_error: I can't let you do that, Dave. + admins_can_join: true + require_membership_for_aliases: + type: boolean + description: Whether to require a user to be in the room to add an alias to it. + default: true + examples: + - false + allow_per_room_profiles: + type: boolean + description: >- + Whether to allow per-room membership profiles through the sending of + membership events with profile information that differs from the target's + global profile. + default: true + examples: + - false + max_avatar_size: + oneOf: + - $ref: "#/$defs/bytes" + - type: "null" + description: >- + The largest permissible file size in bytes for a user avatar. Defaults to + no restriction. Use M for MB and K for KB. + + + Note that user avatar changes will not work if this is set without using + Synapse's media repository. + default: null + examples: + - 10M + allowed_avatar_mimetypes: + type: ["array", "null"] + description: >- + The MIME types allowed for user avatars. Defaults to no restriction. + + + Note that user avatar changes will not work if this is set without using + Synapse's media repository. + items: + type: string + default: null + examples: + - - image/png + - image/jpeg + - image/gif + redaction_retention_period: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + How long to keep redacted events in unredacted form in the database. After + this period redacted events get replaced with their redacted form in the + DB. + + + Synapse will check whether the rentention period has concluded for + redacted events every 5 minutes. Thus, even if this option is set to `0`, + Synapse may still take up to 5 minutes to purge redacted events from the + database. Set to `null` to disable. + default: 7d + examples: + - 28d + forgotten_room_retention_period: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + How long to keep locally forgotten rooms before purging them from the DB. + A value of `null` means it's disabled. + default: null + examples: + - 28d + user_ips_max_age: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + How long to track users' last seen time and IPs in the database. Set to + `null` to disable clearing out of old rows. + default: 28d + examples: + - 14d + next_link_domain_whitelist: + type: ["array", "null"] + description: >- + A list of domains that the domain portion of `next_link` parameters must + match. + + + This parameter is optionally provided by clients while requesting + validation of an email or phone number, and maps to a link that users will + be automatically redirected to after validation succeeds. Clients can make + use this parameter to aid the validation process. + + + The whitelist is applied whether the homeserver or an identity server is + handling validation. + + + The default value is no whitelist functionality; all domains are allowed. + Setting this value to an empty list will instead disallow all domains. + default: null + examples: + - matrix.org + templates: + type: object + description: >- + These options define templates to use when generating email or HTML page + contents. + + + See [here](../../templates.md) for more information about using custom + templates. + properties: + custom_template_directory: + type: ["string", "null"] + description: >- + Determines which directory Synapse will try to find template files in + to use to generate email or HTML page contents. If not set, or a file + is not found within the template directory, a default template from + within the Synapse package will be used. + default: null + examples: + - custom_template_directory: /path/to/custom/templates/ + retention: + type: object + description: >- + This option and the associated options determine message retention policy + at the server level. + + + Room admins and mods can define a retention period for their rooms using + the `m.room.retention` state event, and server admins can cap this period + by setting the `allowed_lifetime_min` and `allowed_lifetime_max` config + options. + + + If this feature is enabled, Synapse will regularly look for and purge + events which are older than the room's maximum retention period. Synapse + will also filter events received over federation so that events that + should have been purged are ignored and not stored again. + + + The message retention policies feature is disabled by default. You can + read more about this feature [here](../../message_retention_policies.md). + properties: + enabled: + type: boolean + description: Enforce message retention policies + default: false + default_policy: + type: object + description: >- + Default message retention policy. If set, Synapse will apply it to + rooms that lack the `m.room.retention` state event. + properties: + min_lifetime: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + Minimum message retention time of the default message retention + policy. Synapse doesn't take this option into account yet. + default: null + max_lifetime: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + Maximum message retention time of the default message retention policy. + default: null + allowed_lifetime_min: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + Retention policy limit. If set, and the state of a room contains a + `m.room.retention` event in its state which contains a `min_lifetime` + that's beyond this bound, Synapse will cap the room's policy to these + limits when running purge jobs. + default: null + allowed_lifetime_max: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + Retention policy limit. If set, and the state of a room contains a + `m.room.retention` event in its state which contains a `max_lifetime` + that's beyond this bound, Synapse will cap the room's policy to these + limits when running purge jobs. + default: null + purge_jobs: + type: ["array", "null"] + description: >- + Server admins can define the settings of the background jobs purging + the events whose lifetime has expired under the `purge_jobs` section. + + + If no configuration is provided for this option, a single job will be + set up to delete expired events in every room daily. + + + Each job's configuration defines which range of message lifetimes the + job takes care of. For example, if `shortest_max_lifetime` is "2d" and + `longest_max_lifetime` is "3d", the job will handle purging expired + events in rooms whose state defines a `max_lifetime` that's both + higher than 2 days, and lower than or equal to 3 days. Both the + minimum and the maximum value of a range are optional, e.g. a job with + no `shortest_max_lifetime` and a `longest_max_lifetime` of "3d" will + handle every room with a retention policy whose `max_lifetime` is + lower than or equal to three days. + + + The rationale for this per-job configuration is that some rooms might + have a retention policy with a low `max_lifetime`, where history needs + to be purged of outdated messages on a more frequent basis than for + the rest of the rooms (e.g. every 12h), but not want that purge to be + performed by a job that's iterating over every room it knows, which + could be heavy on the server. + + + If any purge job is configured, it is strongly recommended to have at + least a single job with neither `shortest_max_lifetime` nor + `longest_max_lifetime` set, or one job without `shortest_max_lifetime` + and one job without `longest_max_lifetime` set. Otherwise some rooms + might be ignored, even if `allowed_lifetime_min` and + `allowed_lifetime_max` are set, because capping a room's policy to + these values is done after the policies are retrieved from Synapse's + database (which is done using the range specified in a purge job's + configuration). + items: + type: object + properties: + shortest_max_lifetime: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + Apply job to rooms that have a `max_lifetime` higher than + `shortest_max_lifetime`. A value of `null` never excludes any + room. + longest_max_lifetime: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + Apply job to rooms that have a `max_lifetime` lower than or + equal to `shortest_max_lifetime`. A value of `null` never + excludes any room. + interval: + $ref: "#/$defs/duration" + description: How often to run the job. + default: null + examples: + - enabled: true + default_policy: + min_lifetime: 1d + max_lifetime: 1y + allowed_lifetime_min: 1d + allowed_lifetime_max: 1y + purge_jobs: + - longest_max_lifetime: 3d + interval: 12h + - shortest_max_lifetime: 3d + interval: 1d + tls_certificate_path: + type: ["string", "null"] + description: >- + This option specifies a PEM-encoded X509 certificate for TLS. This + certificate, as of Synapse 1.0, will need to be a valid and verifiable + certificate, signed by a recognised Certificate Authority. + + + Be sure to use a `.pem` file that includes the full certificate chain + including any intermediate certificates (for instance, if using certbot, + use `fullchain.pem` as your certificate, not `cert.pem`). + default: null + examples: + - CONFDIR/SERVERNAME.tls.crt + tls_private_key_path: + type: ["string", "null"] + description: PEM-encoded private key for TLS. + default: null + examples: + - CONFDIR/SERVERNAME.tls.key + federation_verify_certificates: + type: boolean + description: >- + Whether to verify TLS server certificates for outbound federation + requests. To disable certificate verification, set the option to false. + default: true + examples: + - false + federation_client_minimum_tls_version: + type: string + description: >- + The minimum TLS version that will be used for outbound federation + requests. + + + Configurable to `"1"`, `"1.1"`, `"1.2"`, or `"1.3"`. Note that setting + this value higher than `"1.2"` will prevent federation to most of the + public Matrix network: only configure it to `"1.3"` if you have an + entirely private federation setup and you can ensure TLS 1.3 support. + default: "1" + examples: + - "1.2" + federation_certificate_verification_whitelist: + type: array + description: >- + Skip federation certificate verification on a given whitelist of domains. + + + This setting should only be used in very specific cases, such as + federation over Tor hidden services and similar. For private networks of + homeservers, you likely want to use a private CA instead. + + + Only effective if `federation_verify_certificates` is `true`. + items: + type: string + default: [] + examples: + - - lon.example.com + - "*.domain.com" + - "*.onion" + federation_custom_ca_list: + type: array + description: >- + List of custom certificate authorities for federation traffic. + + + This setting should only normally be used within a private network of + homeservers. + + + Note that this list will replace those that are provided by your operating + environment. Certificates must be in PEM format. + items: + type: string + default: [] + examples: + - - myCA1.pem + - myCA2.pem + - myCA3.pem + federation_domain_whitelist: + type: array + description: >- + Restrict federation to the given whitelist of domains. N.B. we recommend + also firewalling your federation listener to limit inbound federation + traffic as early as possible, rather than relying purely on this + application-layer restriction. If not specified, the default is to + whitelist everything. + + + Note: this does not stop a server from joining rooms that servers not on + the whitelist are in. As such, this option is really only useful to + establish a "private federation", where a group of servers all whitelist + each other and have the same whitelist. + items: + type: string + default: [] + examples: + - - lon.example.com + - nyc.example.com + - syd.example.com + federation_whitelist_endpoint_enabled: + type: boolean + description: >- + Enables an endpoint for fetching the federation whitelist config. + + + The request method and path is `GET + /_synapse/client/v1/config/federation_whitelist`, and the response format + is: + + + ```json + + { + "whitelist_enabled": true, // Whether the federation whitelist is being enforced + "whitelist": [ // Which server names are allowed by the whitelist + "example.com" + ] + } + + ``` + + + If `whitelist_enabled` is `false` then the server is permitted to federate + with all others. + + + The endpoint requires authentication. + default: false + examples: + - true + federation_metrics_domains: + type: array + description: >- + Report prometheus metrics on the age of PDUs being sent to and received + from the given domains. This can be used to give an idea of "delay" on + inbound and outbound federation, though be aware that any delay can be due + to problems at either end or with the intermediate network. + items: + type: string + default: [] + examples: + - - matrix.org + - example.com + allow_profile_lookup_over_federation: + type: boolean + description: >- + Set to false to disable profile lookup over federation. By default, the + Federation API allows other homeservers to obtain profile data of any user + on this homeserver. + default: true + examples: + - false + allow_device_name_lookup_over_federation: + type: boolean + description: >- + Set this option to true to allow device display name lookup over + federation. By default, the Federation API prevents other homeservers from + obtaining the display names of any user devices on this homeserver. + default: false + examples: + - true + federation: + type: object + description: >- + The federation section defines some sub-options related to federation. + + + The following options are related to configuring timeout and retry logic + for one request, independently of the others. Short retry algorithm is + used when something or someone will wait for the request to have an + answer, while long retry is used for requests that happen in the + background, like sending a federation transaction. + + + `destination_*` options control the retry logic when communicating with a + specific homeserver destination. Unlike the previous configuration + options, these values apply across all requests for a given destination + and the state of the backoff is stored in the database. + properties: + client_timeout: + $ref: "#/$defs/duration" + description: Timeout for the federation requests. + default: 60s + max_short_retry_delay: + $ref: "#/$defs/duration" + description: Maximum delay to be used for the short retry algo. + default: 2s + max_long_retry_delay: + $ref: "#/$defs/duration" + description: Maximum delay to be used for the long retry algo. + default: 60s + max_short_retries: + type: integer + description: Maximum number of retries for the short retry algo. + default: 3 + max_long_retries: + type: integer + description: Maximum number of retries for the long retry algo. + default: 10 + destination_min_retry_interval: + $ref: "#/$defs/duration" + description: "The initial backoff, after the first request fails." + default: 10m + destination_retry_multiplier: + type: integer + description: >- + How much we multiply the backoff by after each subsequent fail. + default: 2 + destination_max_retry_interval: + $ref: "#/$defs/duration" + description: A cap on the backoff. + default: 1w + examples: + - client_timeout: 180s + max_short_retry_delay: 7s + max_long_retry_delay: 100s + max_short_retries: 5 + max_long_retries: 20 + destination_min_retry_interval: 30s + destination_retry_multiplier: 5 + destination_max_retry_interval: 12h + event_cache_size: + $ref: "#/$defs/size" + description: >- + The number of events to cache in memory. Defaults to 10K. Like other + caches, this is affected by `caches.global_factor` (see below). + + + For example, the default is 10K and the global_factor default is 0.5. + + + Since 10K * 0.5 is 5K then the event cache size will be 5K. + + + The cache affected by this configuration is named as "\*getEvent\*". + + + Note that this option is not part of the `caches` section. + default: 10K + examples: + - 15K + caches: + type: object + description: >- + A cache "factor" is a multiplier that can be applied to each of Synapse's + caches in order to increase or decrease the maximum number of entries that + can be stored. + io.element.post_description: >- + ### Reloading cache factors + + + The cache factors (i.e. `caches.global_factor` and + `caches.per_cache_factors`) may be reloaded at any time by sending a + [`SIGHUP`](https://en.wikipedia.org/wiki/SIGHUP) signal to Synapse + using e.g. + + + ```commandline + + kill -HUP [PID_OF_SYNAPSE_PROCESS] + + ``` + + + If you are running multiple workers, you must individually update the + worker config file and send this signal to each worker process. + + + If you're using the [example systemd + service](https://github.com/element-hq/synapse/blob/develop/contrib/systemd/matrix-synapse.service) + file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by + using `systemctl reload matrix-synapse`. + properties: + global_factor: + type: number + description: >- + Controls the global cache factor, which is the default cache factor + for all caches if a specific factor for that cache is not otherwise + set. + + + This can also be set by the `SYNAPSE_CACHE_FACTOR` environment + variable. Setting by environment variable takes priority over setting + through the config file. + + + Defaults to 0.5, which will halve the size of all caches. + + + Note that changing this value also affects the HTTP connection pool. + default: 0.5 + per_cache_factors: + type: object + description: >- + A dictionary of cache name to cache factor for that individual cache. + Overrides the global cache factor for a given cache. + + + These can also be set through environment variables comprised of + `SYNAPSE_CACHE_FACTOR_` + the name of the cache in capital letters and + underscores. Setting by environment variable takes priority over + setting through the config file. Ex. + `SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0` + + + Some caches have '*' and other characters that are not alphanumeric or + underscores. These caches can be named with or without the special + characters stripped. For example, to specify the cache factor for + `*stateGroupCache*` via an environment variable would be + `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`. + additionalProperties: + type: number + default: {} + expire_caches: + type: boolean + description: >- + Controls whether cache entries are evicted after a specified time + period. Set to false to disable this feature. Note that never expiring + caches may result in excessive memory usage. + default: true + cache_entry_ttl: + $ref: "#/$defs/duration" + description: >- + If `expire_caches` is enabled, this flag controls how long an entry + can be in a cache without having been accessed before being evicted. + default: 30m + sync_response_cache_duration: + $ref: "#/$defs/duration" + description: >- + Controls how long the results of a /sync request are cached for after + a successful response is returned. A higher duration can help clients + with intermittent connections, at the cost of higher memory usage. A + value of zero means that sync responses are not cached. + + + *Changed in Synapse 1.62.0*: The default was changed from 0 to 2m. + default: 2m + cache_autotuning: + type: object + description: >- + `cache_autotuning` and its sub-options `max_cache_memory_usage`, + `target_cache_memory_usage`, and `min_cache_ttl` work in conjunction + with each other to maintain a balance between cache memory usage and + cache entry availability. You must be using + [jemalloc](../administration/admin_faq.md#help-synapse-is-slow-and-eats-all-my-ramcpu) + to utilize this option, and all three of the options must be specified + for this feature to work. This option defaults to off, enable it by + providing values for the sub-options listed below. Please note that + the feature will not work and may cause unstable behavior (such as + excessive emptying of caches or exceptions) if all of the values are + not provided. Please see the [Config Conventions](#config-conventions) + for information on how to specify memory size and cache expiry + durations. + properties: + max_cache_memory_usage: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + Sets a ceiling on how much memory the cache can use before caches + begin to be continuously evicted. They will continue to be evicted + until the memory usage drops below the + `target_cache_memory_usage`, set in the setting below, or until + the `min_cache_ttl` is hit. + default: null + target_cache_memory_usage: + oneOf: + - $ref: "#/$defs/bytes" + - type: "null" + description: Sets a rough target for the desired memory usage of the caches. + default: null + min_cache_ttl: + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + description: >- + Sets a limit under which newer cache entries are not evicted and + is only applied when caches are actively being + evicted/`max_cache_memory_usage` has been exceeded. This is to + protect hot caches from being emptied while Synapse is evicting + due to memory. + default: null + examples: + - global_factor: 1.0 + per_cache_factors: + get_users_who_share_room_with_user: 2.0 + sync_response_cache_duration: 2m + cache_autotuning: + max_cache_memory_usage: 1024M + target_cache_memory_usage: 758M + min_cache_ttl: 5m + database: + $ref: "#/$defs/database" + examples: + - name: sqlite3 + args: + database: /path/to/homeserver.db + - name: psycopg2 + txn_limit: 10000 + args: + user: synapse_user + password: secretpassword + dbname: synapse + host: localhost + port: 5432 + cp_min: 5 + cp_max: 10 + databases: + type: object + description: >- + The `databases` option allows specifying a mapping between certain + database tables and database host details, spreading the load of a single + Synapse instance across multiple database backends. This is often referred + to as "database sharding". This option is only supported for PostgreSQL + database backends. + + + **Important note:** This is a supported option, but is not currently used + in production by the Matrix.org Foundation. Proceed with caution and + always make backups. + + + `databases` is a dictionary of arbitrarily-named database entries. Each + entry is equivalent to the value of the `database` homeserver config + option (see above), with the addition of a `data_stores` key. + `data_stores` is an array of strings that specifies the data store(s) (a + defined label for a set of tables) that should be stored on the associated + database backend entry. + + + The currently defined values for `data_stores` are: + + + * `"state"`: Database that relates to state groups will be stored in this + database. + + Specifically, that means the following tables: + * `state_groups` + * `state_group_edges` + * `state_groups_state` + + And the following sequences: + * `state_groups_seq_id` + + * `"main"`: All other database tables and sequences. + + + All databases will end up with additional tables used for tracking + database schema migrations and any pending background updates. Synapse + will create these automatically on startup when checking for and/or + performing database schema migrations. + + + To migrate an existing database configuration (e.g. all tables on a single + database) to a different configuration (e.g. the "main" data store on one + database, and "state" on another), do the following: + + + 1. Take a backup of your existing database. Things can and do go wrong and + database corruption is no joke! + + 2. Ensure all pending database migrations have been applied and background + updates have run. The simplest way to do this is to use the + `update_synapse_database` script supplied with your Synapse installation. + + ```sh + update_synapse_database --database-config homeserver.yaml --run-background-updates + ``` + + 3. Copy over the necessary tables and sequences from one database to the + other. Tables relating to database migrations, schemas, schema versions + and background updates should **not** be copied. + + As an example, say that you'd like to split out the "state" data store from an existing database which currently contains all data stores. + + Simply copy the tables and sequences defined above for the "state" datastore from the existing database to the secondary database. As noted above, additional tables will be created in the secondary database when Synapse is started. + + 4. Modify/create the `databases` option in your `homeserver.yaml` to match + the desired database configuration. + + 5. Start Synapse. Check that it starts up successfully and that things + generally seem to be working. + + 6. Drop the old tables that were copied in step 3. + + + Only one of the options `database` or `databases` may be specified in your + config, but not both. + additionalProperties: + $ref: "#/$defs/database" + properties: + data_stores: + type: array + items: + type: string + enum: + - state + - main + default: {} + examples: + - basement_box: + name: psycopg2 + txn_limit: 10000 + data_stores: + - main + args: + user: synapse_user + password: secretpassword + dbname: synapse_main + host: localhost + port: 5432 + cp_min: 5 + cp_max: 10 + my_other_database: + name: psycopg2 + txn_limit: 10000 + data_stores: + - state + args: + user: synapse_user + password: secretpassword + dbname: synapse_state + host: localhost + port: 5432 + cp_min: 5 + cp_max: 10 + log_config: + type: ["string", "null"] + description: >- + This option specifies a yaml python logging config file as described + [here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema). + default: null + examples: + - CONFDIR/SERVERNAME.log.config + rc_message: + $ref: "#/$defs/rc" + description: >- + Ratelimiting settings for client messaging. + + + This is a ratelimiting option for messages that ratelimits sending based + on the account the client is using. + default: + per_second: 0.2 + burst_count: 10.0 + examples: + - per_second: 0.5 + burst_count: 15.0 + rc_registration: + $ref: "#/$defs/rc" + description: >- + This option ratelimits registration requests based on the client's IP address. + default: + per_second: 0.17 + burst_count: 3.0 + examples: + - per_second: 0.15 + burst_count: 2.0 + rc_registration_token_validity: + $ref: "#/$defs/rc" + description: >- + This option checks the validity of registration tokens that ratelimits + requests based on the client's IP address. + default: + per_second: 0.1 + burst_count: 5.0 + examples: + - per_second: 0.3 + burst_count: 6.0 + rc_login: + type: object + description: This option specifies several limits for login. + properties: + address: + $ref: "#/$defs/rc" + description: Ratelimits login requests based on the client's IP address. + default: + per_second: 0.003 + burst_count: 5.0 + account: + $ref: "#/$defs/rc" + description: >- + Ratelimits login requests based on the account the client is + attempting to log into. + default: + per_second: 0.003 + burst_count: 5.0 + failed_attempts: + $ref: "#/$defs/rc" + description: >- + Ratelimits login requests based on the account the client is + attempting to log into, based on the amount of failed login attempts + for this account. + default: + per_second: 0.17 + burst_count: 3.0 + examples: + - address: + per_second: 0.15 + burst_count: 5.0 + account: + per_second: 0.18 + burst_count: 4.0 + failed_attempts: + per_second: 0.19 + burst_count: 7.0 + rc_admin_redaction: + $ref: "#/$defs/rc" + description: >- + This option sets ratelimiting redactions by room admins. If this is not + explicitly set then it uses the same ratelimiting as per `rc_message`. + This is useful to allow room admins to deal with abuse quickly. + examples: + - per_second: 1.0 + burst_count: 50.0 + rc_joins: + type: object + description: This option allows for ratelimiting number of rooms a user can join. + properties: + local: + $ref: "#/$defs/rc" + description: Ratelimits when users are joining rooms the server is already in. + default: + per_second: 0.1 + burst_count: 10.0 + remote: + $ref: "#/$defs/rc" + description: >- + Ratelimits when users are trying to join rooms not on the server + (which can be more computationally expensive than restricting + locally). + default: + per_second: 0.01 + burst_count: 10.0 + examples: + - local: + per_second: 0.2 + burst_count: 15.0 + remote: + per_second: 0.03 + burst_count: 12.0 + rc_joins_per_room: + $ref: "#/$defs/rc" + description: >- + This option allows admins to ratelimit joins to a room based on the number + of recent joins (local or remote) to that room. It is intended to mitigate + mass-join spam waves which target multiple homeservers. + + + _Added in Synapse 1.64.0._ + default: + per_second: 1.0 + burst_count: 10.0 + examples: + - per_second: 1.0 + burst_count: 10.0 + rc_3pid_validation: + $ref: "#/$defs/rc" + description: >- + This option ratelimits how often a user or IP can attempt to validate a 3PID. + default: + per_second: 0.003 + burst_count: 5.0 + examples: + - per_second: 0.003 + burst_count: 5.0 + rc_invites: + type: object + description: >- + This option sets ratelimiting how often invites can be sent in a room or + to a specific user. + + + Client requests that invite user(s) when [creating a + room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom) + will count against the `rc_invites.per_room` limit, whereas client + requests to [invite a single user to a + room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidinvite) + will count against both the `rc_invites.per_user` and + `rc_invites.per_room` limits. + + + Federation requests to invite a user will count against the + `rc_invites.per_user` limit only, as Synapse presumes ratelimiting by room + will be done by the sending server. + + + _Changed in version 1.63:_ added the `per_issuer` limit. + properties: + per_room: + $ref: "#/$defs/rc" + description: Applies to the room of the invitation. + default: + per_second: 0.3 + burst_count: 10.0 + per_user: + $ref: "#/$defs/rc" + description: >- + Applies to the *receiver* of the invite, rather than the sender, + meaning that a `rc_invite.per_user.burst_count` of 5 mandates that a + single user cannot *receive* more than a burst of 5 invites at a + time. + default: + per_second: 0.003 + burst_count: 5.0 + per_issuer: + $ref: "#/$defs/rc" + description: >- + Applies to the *issuer* of the invite, meaning that a + `rc_invite.per_issuer.burst_count` of 5 mandates that single user + cannot *send* more than a burst of 5 invites at a time. + default: + per_second: 0.3 + burst_count: 10.0 + examples: + - per_room: + per_second: 0.5 + burst_count: 5.0 + per_user: + per_second: 0.004 + burst_count: 3.0 + per_issuer: + per_second: 0.5 + burst_count: 5.0 + rc_third_party_invite: + $ref: "#/$defs/rc" + description: >- + This option ratelimits 3PID invites (i.e. invites sent to a third-party ID + such as an email address or a phone number) based on the account that's + sending the invite. + default: + per_second: 0.2 + burst_count: 10.0 + rc_media_create: + $ref: "#/$defs/rc" + description: >- + This option ratelimits creation of MXC URIs via the + `/_matrix/media/v1/create` endpoint based on the account that's creating + the media. + default: + per_second: 10.0 + burst_count: 50.0 + rc_federation: + type: object + description: Defines limits on federation requests. + properties: + window_size: + type: integer + description: Window size in milliseconds. + default: 1000 + sleep_limit: + type: integer + description: >- + Number of federation requests from a single server in a window before + the server will delay processing the request. + default: 10 + sleep_delay: + type: integer + description: >- + Duration in milliseconds to delay processing events from remote + servers by if they go over the sleep limit. + default: 500 + reject_limit: + type: integer + description: >- + Maximum number of concurrent federation requests allowed from a single server. + default: 50 + concurrent: + type: integer + description: >- + Number of federation requests to concurrently process from a single server. + default: 3 + examples: + - window_size: 750 + sleep_limit: 15 + sleep_delay: 400 + reject_limit: 40 + concurrent: 5 + rc_presence: + type: object + description: This option sets ratelimiting for presence. + properties: + per_user: + $ref: "#/$defs/rc" + description: >- + Sets rate limits on how often a specific users' presence updates are + evaluated. Ratelimited presence updates sent via sync are ignored, and + no error is returned to the client. This option also sets the rate + limit for the [`PUT /_matrix/client/v3/presence/{userId}/status`] + endpoint. + + + [`PUT /_matrix/client/v3/presence/{userId}/status`]: + <https://spec.matrix.org/latest/client-server-api/#put_matrixclientv3presenceuseridstatus> + default: + per_user: + per_second: 0.1 + burst_count: 1.0 + examples: + - per_user: + per_second: 0.05 + burst_count: 1.0 + rc_delayed_event_mgmt: + $ref: "#/$defs/rc" + description: >- + Ratelimiting settings for delayed event management. + + + This is a ratelimiting option that ratelimits attempts to restart, cancel, + or view delayed events based on the sending client's account and device + ID. + + + Attempts to create or send delayed events are ratelimited not by this + setting, but by `rc_message`. + + + Setting this to a high value allows clients to make delayed event + management requests often (such as repeatedly restarting a delayed event + with a short timeout, or restarting several different delayed events all + at once) without the risk of being ratelimited. + default: + per_user: + per_second: 1.0 + burst_count: 5.0 + examples: + - per_second: 2.0 + burst_count: 20.0 + federation_rr_transactions_per_room_per_second: + type: integer + description: >- + Sets outgoing federation transaction frequency for sending read-receipts, + per-room. + + + If we end up trying to send out more read-receipts, they will get buffered + up into fewer transactions. + default: 50 + examples: + - 40 + enable_authenticated_media: + type: boolean + description: >- + When set to true, all subsequent media uploads will be marked as + authenticated, and will not be available over legacy unauthenticated media + endpoints (`/_matrix/media/(r0|v3|v1)/download` and + `/_matrix/media/(r0|v3|v1)/thumbnail`) – requests for authenticated media + over these endpoints will result in a 404. All media, including + authenticated media, will be available over the authenticated media + endpoints `_matrix/client/v1/media/download` and + `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this + option to true will still be available over the legacy endpoints. Note if + the setting is switched to false after enabling, media marked as + authenticated will be available over legacy endpoints. Defaults to true + (previously false). In a future release of Synapse, this option will be + removed and become always-on. + + + In all cases, authenticated requests to download media will succeed, but + for unauthenticated requests, this case-by-case breakdown describes + whether media downloads are permitted: + + + * `enable_authenticated_media = False`: + * unauthenticated client or homeserver requesting local media: allowed + * unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache, or as long as the remote homeserver does not require authentication to retrieve the media + * `enable_authenticated_media = True`: + * unauthenticated client or homeserver requesting local media: allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); otherwise denied. + * unauthenticated client or homeserver requesting remote media: the same as for local media; allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); otherwise denied. + + It is especially notable that media downloaded before this option existed + (in older Synapse versions), or whilst this option was set to `False`, + will perpetually be available over the legacy, unauthenticated endpoint, + even after this option is set to `True`. This is for backwards + compatibility with older clients and homeservers that do not yet support + requesting authenticated media; those older clients or homeservers will + not be cut off from media they can already see. + + + _Changed in Synapse 1.120:_ This option now defaults to `True` when not + set, whereas before this version it defaulted to `False`. + default: true + examples: + - false + enable_media_repo: + type: boolean + description: >- + Enable the media store service in the Synapse master. Set to false if you + are using a separate media store worker. + default: true + examples: + - false + media_store_path: + type: string + description: Directory where uploaded images and attachments are stored. + default: media_store + examples: + - DATADIR/media_store + max_pending_media_uploads: + type: integer + description: >- + How many *pending media uploads* can a given user have? A pending media + upload is a created MXC URI that (a) is not expired (the + `unused_expires_at` timestamp has not passed) and (b) the media has not + yet been uploaded for. + default: 5 + examples: + - 5 + unused_expiration_time: + $ref: "#/$defs/duration" + description: How long to wait in milliseconds before expiring created media IDs. + default: 24h + examples: + - 1h + media_storage_providers: + type: array + description: >- + Media storage providers allow media to be stored in different locations. + items: + type: object + properties: + module: + type: string + description: "Type of resource, e.g. `file_system`." + store_local: + type: boolean + description: Whether to store newly uploaded local files. + store_remote: + type: boolean + description: Whether to store newly downloaded local files. + store_synchronous: + type: boolean + description: Whether to wait for successful storage for local uploads. + config: + type: object + description: Sets a path to the resource through the `directory` option. + properties: + directory: + type: string + description: Path to the resource. + default: [] + examples: + - - module: file_system + store_local: false + store_remote: false + store_synchronous: false + config: + directory: /mnt/some/other/directory + max_upload_size: + $ref: "#/$defs/bytes" + description: >- + The largest allowed upload size in bytes. + + + If you are using a reverse proxy you may also need to set this value in + your reverse proxy's config. Notably Nginx has a small max body size by + default. See [here](../../reverse_proxy.md) for more on using a reverse + proxy with Synapse. + default: 50M + examples: + - 60M + max_image_pixels: + $ref: "#/$defs/bytes" + description: Maximum number of pixels that will be thumbnailed. + default: 32M + examples: + - 35M + remote_media_download_burst_count: + $ref: "#/$defs/bytes" + description: >- + Remote media downloads are ratelimited using a [leaky bucket + algorithm](https://en.wikipedia.org/wiki/Leaky_bucket), where a given + "bucket" is keyed to the IP address of the requester when requesting + remote media downloads. This configuration option sets the size of the + bucket against which the size in bytes of downloads are penalized – if the + bucket is full, i.e. a given number of bytes have already been downloaded, + further downloads will be denied until the bucket drains. See also + `remote_media_download_per_second` which determines the rate at which the + "bucket" is emptied and thus has available space to authorize new + requests. + default: 500MiB + examples: + - 200M + remote_media_download_per_second: + $ref: "#/$defs/bytes" + description: >- + Works in conjunction with `remote_media_download_burst_count` to ratelimit + remote media downloads – this configuration option determines the rate at + which the "bucket" (see above) leaks in bytes per second. As requests are + made to download remote media, the size of those requests in bytes is + added to the bucket, and once the bucket has reached it's capacity, no + more requests will be allowed until a number of bytes has "drained" from + the bucket. This setting determines the rate at which bytes drain from the + bucket, with the practical effect that the larger the number, the faster + the bucket leaks, allowing for more bytes downloaded over a shorter period + of time. Defaults to 87KiB per second. See also + `remote_media_download_burst_count`. + default: 87KiB + examples: + - 40K + prevent_media_downloads_from: + type: array + description: >- + A list of domains to never download media from. Media from these domains + that is already downloaded will not be deleted, but will be inaccessible + to users. This option does not affect admin APIs trying to + download/operate on media. + + + This will not prevent the listed domains from accessing media themselves. + It simply prevents users on this server from downloading media originating + from the listed servers. + + + This will have no effect on media originating from the local server. This + only affects media downloaded from other Matrix servers, to control URL + previews see + [`url_preview_ip_range_blacklist`](#url_preview_ip_range_blacklist) or + [`url_preview_url_blacklist`](#url_preview_url_blacklist). + items: + type: string + default: [] + examples: + - - evil.example.org + - evil2.example.org + dynamic_thumbnails: + type: boolean + description: >- + Whether to generate new thumbnails on the fly to precisely match the + resolution requested by the client. If true then whenever a new resolution + is requested by the client the server will generate a new thumbnail. If + false the server will pick a thumbnail from a precalculated list. + default: false + examples: + - true + thumbnail_sizes: + type: array + description: List of thumbnails to precalculate when an image is uploaded. + items: + type: object + properties: + width: + type: integer + description: Width of the generated thumbnail. + height: + type: integer + description: Height of the generated thumbnail. + method: + type: string + enum: + - crop + - scale + description: >- + Method to fit the thumbnail dimensions. Current options are `crop` + and `scale`. + default: + - width: 32 + height: 32 + method: crop + - width: 96 + height: 96 + method: crop + - width: 320 + height: 240 + method: scale + - width: 640 + height: 480 + method: scale + - width: 800 + height: 600 + method: scale + media_retention: + type: object + description: >- + Controls whether local media and entries in the remote media cache (media + that is downloaded from other homeservers) should be removed under certain + conditions, typically for the purpose of saving space. + + + Purging media files will be the carried out by the media worker (that is, + the worker that has the `enable_media_repo` homeserver config option set + to `true`). This may be the main process. + + + The `media_retention.local_media_lifetime` and + `media_retention.remote_media_lifetime` config options control whether + media will be purged if it has not been accessed in a given amount of + time. Note that media is "accessed" when loaded in a room in a client, or + otherwise downloaded by a local or remote user. If the media has never + been accessed, the media's creation time is used instead. Both thumbnails + and the original media will be removed. If either of these options are + unset, then media of that type will not be purged. + + + Local or cached remote media that has been + [quarantined](../../admin_api/media_admin_api.md#quarantining-media-in-a-room) + will not be deleted. Similarly, local media that has been marked as + [protected from + quarantine](../../admin_api/media_admin_api.md#protecting-media-from-being-quarantined) + will not be deleted. + properties: + local_media_lifetime: + description: >- + Duration without access to a local media resource after which it will + be purged. If the media has never been accessed, the media's creation + time is used instead. Both thumbnails and the original media will be + removed. If unset or null, local media will not be purged. + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + default: null + remote_media_lifetime: + description: >- + Duration without access to a remote media resource after which it will + be purged. If the media has never been accessed, the media's creation + time is used instead. Both thumbnails and the original media will be + removed. If unset or null, remote media will not be purged. + oneOf: + - $ref: "#/$defs/duration" + - type: "null" + default: null + examples: + - local_media_lifetime: 90d + remote_media_lifetime: 14d + url_preview_enabled: + type: boolean + description: >- + This setting determines whether the preview URL API is enabled. Set to + true to enable. If enabled you must specify a + `url_preview_ip_range_blacklist` blacklist. + default: false + examples: + - true + url_preview_ip_range_blacklist: + type: ["array", "null"] + description: >- + List of IP address CIDR ranges that the URL preview spider is denied from + accessing. There are no defaults: you must explicitly specify a list for + URL previewing to work. You should specify any internal services in your + network that you do not want synapse to try to connect to, otherwise + anyone in any Matrix room could cause your synapse to issue arbitrary GET + requests to your internal services, causing serious security issues. + + + (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly + listed here, since they correspond to unroutable addresses.) + + + This must be specified if `url_preview_enabled` is set. It is recommended + that you use the following example list as a starting point. + + + Note: The value is ignored when an HTTP proxy is in use. + items: + type: string + default: null + examples: + - - 127.0.0.0/8 + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + - 100.64.0.0/10 + - 192.0.0.0/24 + - 169.254.0.0/16 + - 192.88.99.0/24 + - 198.18.0.0/15 + - 192.0.2.0/24 + - 198.51.100.0/24 + - 203.0.113.0/24 + - 224.0.0.0/4 + - "::1/128" + - "fe80::/10" + - "fc00::/7" + - "2001:db8::/32" + - "ff00::/8" + - "fec0::/10" + url_preview_ip_range_whitelist: + type: array + description: >- + This option sets a list of IP address CIDR ranges that the URL preview + spider is allowed to access even if they are specified in + `url_preview_ip_range_blacklist`. This is useful for specifying exceptions + to wide-ranging blacklisted target IP ranges – e.g. for enabling URL + previews for a specific private website only visible in your network. + items: + type: string + default: [] + examples: + - - 192.168.1.1 + url_preview_url_blacklist: + type: array + description: >- + Optional list of URL matches that the URL preview spider is denied from + accessing. This is a usability feature, not a security one. You should use + `url_preview_ip_range_blacklist` in preference to this, otherwise someone + could define a public DNS entry that points to a private IP address and + circumvent the blacklist. Applications that perform redirects or serve + different content when detecting that Synapse is accessing them can also + bypass the blacklist. This is more useful if you know there is an entire + shape of URL that you know that you do not want Synapse to preview. + + + Each list entry is a dictionary of url component attributes as returned by + urlparse.urlsplit as applied to the absolute form of the URL. See + [here](https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit) + for more information. Some examples are: + + + * `username` + + * `netloc` + + * `scheme` + + * `path` + + + The values of the dictionary are treated as a filename match pattern + applied to that component of URLs, unless they start with a ^ in which + case they are treated as a regular expression match. If all the specified + component matches for a given list item succeed, the URL is blacklisted. + items: + type: object + default: [] + examples: + - - username: "*" + - netloc: google.com + - netloc: "*.google.com" + - scheme: http + - netloc: www.acme.com + path: /foo + - netloc: "^[0-9]+.[0-9]+.[0-9]+.[0-9]+$" + max_spider_size: + $ref: "#/$defs/bytes" + description: The largest allowed URL preview spidering size in bytes. + default: 10M + examples: + - 8M + url_preview_accept_language: + type: array + description: >- + A list of values for the Accept-Language HTTP header used when downloading + webpages during URL preview generation. This allows Synapse to specify the + preferred languages that URL previews should be in when communicating with + remote servers. + + + Each value is a IETF language tag; a 2-3 letter identifier for a language, + optionally followed by subtags separated by `-`, specifying a country or + region variant. + + + Multiple values can be provided, and a weight can be added to each by + using quality value syntax (;q=). `*` translates to any language. + items: + type: string + default: + - en + examples: + - - en-UK + - en-US;q=0.9 + - fr;q=0.8 + - "*;q=0.7" + oembed: + type: object + description: >- + oEmbed allows for easier embedding content from a website. It can be used + for generating URLs previews of services which support it. A default list + of oEmbed providers is included with Synapse. + properties: + disable_default_providers: + type: boolean + description: Do not use Synapse's default list of oEmbed providers. + default: false + additional_providers: + type: array + description: >- + Additional files with oEmbed configuration (each should be in the form + of providers.json). + items: + type: string + default: [] + examples: + - disable_default_providers: true + additional_providers: + - oembed/my_providers.json + recaptcha_public_key: + type: ["string", "null"] + description: >- + This homeserver's ReCAPTCHA public key. Must be specified if + [`enable_registration_captcha`](#enable_registration_captcha) is enabled. + default: null + examples: + - YOUR_PUBLIC_KEY + recaptcha_private_key: + type: ["string", "null"] + description: >- + This homeserver's ReCAPTCHA private key. Must be specified if + [`enable_registration_captcha`](#enable_registration_captcha) is enabled. + default: null + examples: + - YOUR_PRIVATE_KEY + enable_registration_captcha: + type: boolean + description: >- + Set to `true` to require users to complete a CAPTCHA test when registering + an account. Requires a valid ReCaptcha public/private key. + + + Note that [`enable_registration`](#enable_registration) must also be set + to allow account registration. + default: false + examples: + - true + recaptcha_siteverify_api: + type: string + description: The API endpoint to use for verifying `m.login.recaptcha` responses. + default: "https://www.recaptcha.net/recaptcha/api/siteverify" + examples: + - "https://my.recaptcha.site" + turn_uris: + type: array + description: The public URIs of the TURN server to give to clients. + items: + type: string + default: [] + examples: + - - "turn:example.org" + turn_shared_secret: + type: ["string", "null"] + description: The shared secret used to compute passwords for the TURN server. + default: null + examples: + - YOUR_SHARED_SECRET + turn_shared_secret_path: + type: ["string", "null"] + description: >- + An alternative to [`turn_shared_secret`](#turn_shared_secret): allows the + shared secret to be specified in an external file. + + + The file should be a plain text file, containing only the shared secret. + Synapse reads the shared secret from the given file once at startup. + + + _Added in Synapse 1.116.0._ + default: null + examples: + - /path/to/secrets/file + turn_username: + type: ["string", "null"] + description: TURN server username if not using a token. + default: null + examples: + - TURNSERVER_USERNAME + turn_password: + type: ["string", "null"] + description: TURN server password if not using a token. + default: null + examples: + - TURNSERVER_PASSWORD + turn_user_lifetime: + $ref: "#/$defs/duration" + description: How long generated TURN credentials last. + default: 1h + examples: + - 2h + turn_allow_guests: + type: boolean + description: >- + Whether guests should be allowed to use the TURN server. If false, VoIP + will be unreliable for guests. However, it does introduce a slight + security risk as it allows users to connect to arbitrary endpoints without + having first signed up for a valid account (e.g. by passing a CAPTCHA). + default: true + examples: + - false + enable_registration: + type: boolean + description: >- + Enable registration for new users. + + + It is highly recommended that if you enable registration, you set one or + more or the following options, to avoid abuse of your server by "bots": + + + * [`enable_registration_captcha`](#enable_registration_captcha) + + * [`registrations_require_3pid`](#registrations_require_3pid) + + * [`registration_requires_token`](#registration_requires_token) + + + (In order to enable registration without any verification, you must also + set + [`enable_registration_without_verification`](#enable_registration_without_verification).) + + + Note that even if this setting is disabled, new accounts can still be + created via the admin API if + [`registration_shared_secret`](#registration_shared_secret) is set. + default: false + examples: + - true + enable_registration_without_verification: + type: boolean + description: >- + Enable registration without email or captcha verification. Note: this + option is *not* recommended, as registration without verification is a + known vector for spam and abuse. Has no effect unless + [`enable_registration`](#enable_registration) is also enabled. + default: false + examples: + - true + registration_requires_token: + type: boolean + description: >- + Require users to submit a token during registration. Tokens can be managed + using the admin [API](../administration/admin_api/registration_tokens.md). + Disabling this option will not delete any tokens previously generated. + + + Note that [`enable_registration`](#enable_registration) must also be set + to allow account registration. + default: false + examples: + - true + registration_shared_secret: + type: ["string", "null"] + description: >- + If set, allows registration of standard or admin accounts by anyone who + has the shared secret, even if + [`enable_registration`](#enable_registration) is not set. + + + This is primarily intended for use with the `register_new_matrix_user` + script (see [Registering a + user](../../setup/installation.md#registering-a-user)); however, the + interface is [documented](../../admin_api/register_api.html). + + + Replacing an existing `registration_shared_secret` with a new one requires + users of the [Shared-Secret Registration + API](../../admin_api/register_api.html) to start using the new secret for + requesting any further one-time nonces. + + + > ⚠️ **Warning** – The additional consequences of replacing + [`macaroon_secret_key`](#macaroon_secret_key) will apply in case it + delegates to `registration_shared_secret`. + + + See also + [`registration_shared_secret_path`](#registration_shared_secret_path). + default: null + examples: + - "<PRIVATE STRING>" + registration_shared_secret_path: + type: ["string", "null"] + description: >- + An alternative to + [`registration_shared_secret`](#registration_shared_secret): allows the + shared secret to be specified in an external file. + + + The file should be a plain text file, containing only the shared secret. + + + If this file does not exist, Synapse will create a new shared secret on + startup and store it in this file. + + + _Added in Synapse 1.67.0._ + default: null + examples: + - /path/to/secrets/file + bcrypt_rounds: + type: integer + description: >- + Set the number of bcrypt rounds used to generate password hash. Larger + numbers increase the work factor needed to generate the hash. The default + number is 12 (which equates to 2^12 rounds). N.B. that increasing this + will exponentially increase the time required to register or login - e.g. + 24 => 2^24 rounds which will take >20 mins. + default: 12 + examples: + - 14 + allow_guest_access: + type: boolean + description: >- + Allows users to register as guests without a password/email/etc, and + participate in rooms hosted on this server which have been made accessible + to anonymous users. + default: false + examples: + - true + enable_set_displayname: + type: boolean + description: >- + Whether users are allowed to change their displayname after it has been + initially set. Useful when provisioning users based on the contents of a + third-party directory. + + + Does not apply to server administrators. + default: true + examples: + - false + enable_set_avatar_url: + type: boolean + description: >- + Whether users are allowed to change their avatar after it has been + initially set. Useful when provisioning users based on the contents of a + third-party directory. + + + Does not apply to server administrators. + default: true + examples: + - false + auto_join_rooms: + type: array + description: >- + Users who register on this homeserver will automatically be joined to the + rooms listed under this option. + + + By default, any room aliases included in this list will be created as a + publicly joinable room when the first user registers for the homeserver. + If the room already exists, make certain it is a publicly joinable room, + i.e. the join rule of the room must be set to `public`. You can find more + options relating to auto-joining rooms below. + + + As Spaces are just rooms under the hood, Space aliases may also be used. + items: + type: string + default: [] + examples: + - - "#exampleroom:example.com" + - "#anotherexampleroom:example.com" + autocreate_auto_join_rooms: + type: boolean + description: >- + Where `auto_join_rooms` are specified, setting this flag ensures that the + rooms exist by creating them when the first user on the homeserver + registers. This option will not create Spaces. + + + By default the auto-created rooms are publicly joinable from any federated + server. Use the `autocreate_auto_join_rooms_federated` and + `autocreate_auto_join_room_preset` settings to customise this behaviour. + + + Setting to false means that if the rooms are not manually created, users + cannot be auto-joined since they do not exist. + default: true + examples: + - false + autocreate_auto_join_rooms_federated: + type: boolean + description: >- + Whether the rooms listed in `auto_join_rooms` that are auto-created are + available via federation. Only has an effect if + `autocreate_auto_join_rooms` is true. + + + Note that whether a room is federated cannot be modified after creation. + + + If true, the room will be joinable from other servers. If false, users + from other homeservers are prevented from joining these rooms. + default: true + examples: + - false + autocreate_auto_join_room_preset: + type: string + description: >- + The room preset to use when auto-creating one of `auto_join_rooms`. Only + has an effect if `autocreate_auto_join_rooms` is true. + + + Possible values for this option are: + + * "public_chat": the room is joinable by anyone, including federated + servers if `autocreate_auto_join_rooms_federated` is true (the default). + + * "private_chat": an invitation is required to join these rooms. + + * "trusted_private_chat": an invitation is required to join this room and + the invitee is assigned a power level of 100 upon joining the room. + + + Each preset will set up a room in the same manner as if it were provided + as the `preset` parameter when calling the [`POST + /_matrix/client/v3/createRoom`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom) + Client-Server API endpoint. + + + If a value of "private_chat" or "trusted_private_chat" is used then + `auto_join_mxid_localpart` must also be configured. + enum: + - public_chat + - private_chat + - trusted_private_chat + default: public_chat + examples: + - private_chat + auto_join_mxid_localpart: + type: ["string", "null"] + description: >- + The local part of the user id which is used to create `auto_join_rooms` if + `autocreate_auto_join_rooms` is true. If this is not provided then the + initial user account that registers will be used to create the rooms. + + + The user id is also used to invite new users to any auto-join rooms which + are set to invite-only. + + + It *must* be configured if `autocreate_auto_join_room_preset` is set to + "private_chat" or "trusted_private_chat". + + + Note that this must be specified in order for new users to be correctly + invited to any auto-join rooms which have been set to invite-only (either + at the time of creation or subsequently). + + + Note that, if the room already exists, this user must be joined and have + the appropriate permissions to invite new members. + default: null + examples: + - system + auto_join_rooms_for_guests: + type: boolean + description: >- + When `auto_join_rooms` is specified, setting this flag to false prevents + guest accounts from being automatically joined to the rooms. + default: true + examples: + - false + inhibit_user_in_use_error: + type: boolean + description: >- + Whether to inhibit errors raised when registering a new account if the + user ID already exists. If turned on, requests to `/register/available` + will always show a user ID as available, and Synapse won't raise an error + when starting a registration with a user ID that already exists. However, + Synapse will still raise an error if the registration completes and the + username conflicts. + default: false + examples: + - true + allow_underscore_prefixed_registration: + type: boolean + description: >- + Whether users are allowed to register with a underscore-prefixed + localpart. By default, AppServices use prefixes like `_example` to + namespace their associated ghost users. If turned on, this may result in + clashes or confusion. Useful when provisioning users from an external + identity provider. + default: false + examples: + - true + session_lifetime: + $ref: "#/$defs/duration" + description: >- + Time that a user's session remains valid for, after they log in. + + + Note that this is not currently compatible with guest logins. + + + Note also that this is calculated at login time: changes are not applied + retrospectively to users who have already logged in. + default: infinity + examples: + - 24h + refreshable_access_token_lifetime: + $ref: "#/$defs/duration" + description: >- + Time that an access token remains valid for, if the session is using + refresh tokens. + + + For more information about refresh tokens, please see the + [manual](user_authentication/refresh_tokens.md). + + + Note that this only applies to clients which advertise support for refresh + tokens. + + + Note also that this is calculated at login time and refresh time: changes + are not applied to existing sessions until they are refreshed. + default: 5m + examples: + - 10m + refresh_token_lifetime: + $ref: "#/$defs/duration" + description: >- + Time that a refresh token remains valid for (provided that it is not + exchanged for another one first). This option can be used to automatically + log-out inactive sessions. Please see the manual for more information. + + + Note also that this is calculated at login time and refresh time: changes + are not applied to existing sessions until they are refreshed. + default: infinity + examples: + - 24h + nonrefreshable_access_token_lifetime: + $ref: "#/$defs/duration" + description: >- + Time that an access token remains valid for, if the session is NOT using + refresh tokens. + + + Please note that not all clients support refresh tokens, so setting this + to a short value may be inconvenient for some users who will then be + logged out frequently. + + + Note also that this is calculated at login time: changes are not applied + retrospectively to existing sessions for users that have already logged + in. + default: infinity + examples: + - 24h + ui_auth: + oneOf: + - $ref: "#/$defs/duration" + - type: object + properties: + session_timeout: + $ref: "#/$defs/duration" + description: >- + The amount of time to allow a user-interactive authentication session to + be active. + + + This defaults to 0, meaning the user is queried for their credentials + before every action, but this can be overridden to allow a single + validation to be re-used. This weakens the protections afforded by the + user-interactive authentication process, by allowing for multiple (and + potentially different) operations to use the same validation session. + + + This is ignored for potentially "dangerous" operations (including + deactivating an account, modifying an account password, adding a 3PID, and + minting additional login tokens). + + + Use the `session_timeout` sub-option here to change the time allowed for + credential validation. + default: 0 + examples: + - session_timeout: 15s + login_via_existing_session: + type: object + description: >- + Matrix supports the ability of an existing session to mint a login token + for another client. + + + Synapse disables this by default as it has security ramifications – a + malicious client could use the mechanism to spawn more than one session. + properties: + enabled: + type: boolean + description: "Enable login via existing session." + default: false + require_ui_auth: + type: boolean + description: Require user-interactive authentication. + default: true + token_timeout: + $ref: "#/$defs/duration" + description: Duration of time the generated token is valid. + default: 5m + examples: + - enabled: true + require_ui_auth: false + token_timeout: 5m + enable_metrics: + type: boolean + description: Set to true to enable collection and rendering of performance metrics. + default: false + examples: + - true + sentry: + type: object + description: >- + Use this option to enable sentry integration. Provide the DSN assigned to + you by sentry with the `dsn` setting. + + + An optional `environment` field can be used to specify an environment. + This allows for log maintenance based on different environments, ensuring + better organization and analysis. + + + NOTE: While attempts are made to ensure that the logs don't contain any + sensitive information, this cannot be guaranteed. By enabling this option + the sentry server may therefore receive sensitive information, and it in + turn may then disseminate sensitive information through insecure + notification channels if so configured. + properties: + dsn: + type: ["string", "null"] + description: >- + The DSN assigned by sentry. If unset or null, sentry integration is + disabled. + default: null + environment: + type: ["string", "null"] + description: Sentry environment. + default: null + examples: + - environment: production + dsn: ... + metrics_flags: + type: object + description: >- + Flags to enable Prometheus metrics which are not suitable to be enabled by + default, either for performance reasons or limited use. Currently the only + option is `known_servers`. + properties: + known_servers: + type: boolean + description: >- + Publishes `synapse_federation_known_servers`, a gauge of the number of + servers this homeserver knows about, including itself. May cause + performance problems on large homeservers. + default: false + examples: + - known_servers: true + report_stats: + type: boolean + description: >- + Whether or not to report homeserver usage statistics. This is originally + set when generating the config. Set this option to true or false to change + the current behavior. See [Reporting Homeserver Usage + Statistics](../administration/monitoring/reporting_homeserver_usage_statistics.md) + for information on what data is reported. + + + Statistics will be reported 5 minutes after Synapse starts, and then every + 3 hours after that. + default: false + examples: + - true + report_stats_endpoint: + type: string + description: The endpoint to report homeserver usage statistics to. + default: https://matrix.org/report-usage-stats/push + examples: + - https://example.com/report-usage-stats/push + room_prejoin_state: + type: object + description: >- + This setting controls the state that is shared with users upon receiving + an invite to a room, or in reply to a knock on a room. By default, the + following state events are shared with users: + + + - `m.room.join_rules` + + - `m.room.canonical_alias` + + - `m.room.avatar` + + - `m.room.encryption` + + - `m.room.name` + + - `m.room.create` + + - `m.room.topic` + + + *Changed in Synapse 1.74:* admins can filter the events in prejoin state + based on their state key. + properties: + disable_default_event_types: + type: boolean + description: >- + Set to `true` to disable the above defaults. If this is enabled, only + the event types listed in `additional_event_types` are shared. + default: false + additional_event_types: + type: array + description: >- + A list of additional state events to include in the events to be + shared. By default, this list is empty (so only the default event + types are shared). + + + Each entry in this list should be either a single string or a list of + two strings. + + * A standalone string `t` represents all events with type `t` (i.e. + with no restrictions on state keys). + + * A pair of strings `[t, s]` represents a single event with type `t` + and state key `s`. The same type can appear in two entries with + different state keys: in this situation, both state keys are included + in prejoin state. + items: + type: ["string", "array"] + items: + type: string + default: [] + examples: + - disable_default_event_types: false + additional_event_types: + - org.example.custom.event.typeA + - - org.example.custom.event.typeB + - foo + - - org.example.custom.event.typeC + - bar + - - org.example.custom.event.typeC + - baz + track_puppeted_user_ips: + type: boolean + description: >- + We record the IP address of clients used to access the API for various + reasons, including displaying it to the user in the "Where you're signed + in" dialog. + + + By default, when puppeting another user via the admin API, the client IP + address is recorded against the user who created the access token (ie, the + admin user), and *not* the puppeted user. + + + Set this option to true to also record the IP address against the puppeted + user. (This also means that the puppeted user will count as an "active" + user for the purpose of monthly active user tracking – see + `limit_usage_by_mau` etc above.) + default: false + examples: + - true + app_service_config_files: + type: array + description: A list of application service config files to use. + items: + type: string + default: [] + examples: + - - app_service_1.yaml + - app_service_2.yaml + track_appservice_user_ips: + type: boolean + description: >- + Set to true to enable tracking of application service IP addresses. + Implicitly enables MAU tracking for application service users. + default: false + examples: + - true + use_appservice_legacy_authorization: + type: boolean + description: >- + Whether to send the application service access tokens via the + `access_token` query parameter per older versions of the Matrix + specification. Defaults to false. Set to true to enable sending access + tokens via a query parameter. + + + **Enabling this option is considered insecure and is not recommended.** + default: false + examples: + - true + macaroon_secret_key: + type: ["string", "null"] + description: >- + A secret which is used to sign + + - access token for guest users, + + - short-term login token used during SSO logins (OIDC) and + + - token used for unsubscribing from email notifications. + + + If none is specified, the `registration_shared_secret` is used, if one is + given; otherwise, a secret key is derived from the signing key. + + + > ⚠️ **Warning** – Replacing an existing `macaroon_secret_key` with a new + one will lead to invalidation of access tokens for all guest users. It + will also break unsubscribe links in emails sent before the change. An + unlucky user might encounter a broken SSO login flow and would have to + start again. + default: null + examples: + - "<PRIVATE STRING>" + macaroon_secret_key_path: + type: ["string", "null"] + description: >- + An alternative to [`macaroon_secret_key`](#macaroon_secret_key): allows + the secret key to be specified in an external file. + + + The file should be a plain text file, containing only the secret key. + Synapse reads the secret key from the given file once at startup. + + + _Added in Synapse 1.121.0._ + default: null + examples: + - /path/to/secrets/file + form_secret: + type: ["string", "null"] + description: >- + A secret which is used to calculate HMACs for form values, to stop + falsification of values. Must be specified for the User Consent forms to + work. + + + Replacing an existing `form_secret` with a new one might break the user + consent page for an unlucky user and require them to reopen the page from + a new link. + default: null + examples: + - "<PRIVATE STRING>" + form_secret_path: + type: ["string", "null"] + description: >- + An alternative to [`form_secret`](#form_secret): allows the secret to be + specified in an external file. + + + The file should be a plain text file, containing only the secret. Synapse + reads the secret from the given file once at startup. + + + _Added in Synapse 1.126.0._ + default: null + examples: + - /path/to/secrets/file + signing_key_path: + type: ["string", "null"] + description: >- + Path to the signing key to sign events and federation requests with. + + + *New in Synapse 1.67*: If this file does not exist, Synapse will create a + new signing key on startup and store it in this file. + default: null + examples: + - CONFDIR/SERVERNAME.signing.key + old_signing_keys: + type: object + description: >- + The keys that the server used to sign messages with but won't use to sign + new messages. + + + It is possible to build an entry from an old `signing.key` file using the + `export_signing_key` script which is provided with synapse. + + + If you have lost the private key file, you can ask another server you + trust to tell you the public keys it has seen from your server. To fetch + the keys from `matrix.org`, try something like: + + + ``` + + curl https://matrix-federation.matrix.org/_matrix/key/v2/query/myserver.example.com | + jq '.server_keys | map(.verify_keys) | add' + ``` + additionalProperties: + type: object + properties: + key: + type: string + description: The base64-encoded public key. + expired_ts: + type: integer + description: >- + Time, in milliseconds since the unix epoch, that the key was last used. + default: {} + examples: + - "ed25519:id": + key: base64string + expired_ts: 123456789123 + key_refresh_interval: + $ref: "#/$defs/duration" + description: >- + How long key response published by this server is valid for. Used to set the + `valid_until_ts` in `/key/v2` APIs. Determines how quickly servers will query to + check which keys are still valid. + default: 1d + examples: + - 2d + trusted_key_servers: + type: array + description: >- + The trusted servers to download signing keys from. + + + When we need to fetch a signing key, each server is tried in parallel. + + + Normally, the connection to the key server is validated via TLS + certificates. Additional security can be provided by configuring a `verify + key`, which will make synapse check that the response is signed by that + key. + + + This setting supersedes an older setting named `perspectives`. The old + format is still supported for backwards-compatibility, but it is + deprecated. + + + `trusted_key_servers` defaults to matrix.org, but using it will generate a + warning on start-up. To suppress this warning, set + `suppress_key_server_warning` to true. + + + If the use of a trusted key server has to be deactivated, e.g. in a + private federation or for privacy reasons, this can be realised by setting + an empty array (`trusted_key_servers: []`). Then Synapse will request the + keys directly from the server that owns the keys. If Synapse does not get + keys directly from the server, the events of this server will be rejected. + items: + server_name: + type: string + description: The name of the server. Required. + verify_keys: + type: ["object", "null"] + description: >- + An optional map from key id to base64-encoded public key. If + specified, we will check that the response is signed by at least one + of the given keys. + additionalProperties: + type: string + accept_keys_insecurely: + type: boolean + description: >- + Normally, if `verify_keys` is unset, and + `federation_verify_certificates` is not `true`, synapse will refuse to + start, because this would allow anyone who can spoof DNS responses to + masquerade as the trusted key server. If you know what you are doing + and are sure that your network environment provides a secure + connection to the key server, you can set this to `true` to override + this behaviour. + default: + - server_name: matrix.org + examples: + - - server_name: my_trusted_server.example.com + verify_keys: + "ed25519:auto": abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr + - server_name: my_other_trusted_server.example.com + - - server_name: matrix.org + suppress_key_server_warning: + type: boolean + description: >- + Set the following to true to disable the warning that is emitted when the + `trusted_key_servers` include "matrix.org". See above. + default: false + examples: + - true + key_server_signing_keys_path: + type: ["string", "null"] + description: >- + The signing keys to use when acting as a trusted key server. If not + specified defaults to the server signing key. + + + Can contain multiple keys, one per line. + default: null + examples: + - key_server_signing_keys.key + oidc_providers: + type: array + description: >- + List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for + registration and login. See [here](../../openid.md) for information on how + to configure these options. + + + For backwards compatibility, it is also possible to configure a single + OIDC provider via an `oidc_config` setting. This is now deprecated and + admins are advised to migrate to the `oidc_providers` format. (When doing + that migration, use `oidc` for the `idp_id` to ensure that existing users + continue to be recognised.) + + + It is possible to configure Synapse to only allow logins if certain + attributes match particular values in the OIDC userinfo. The requirements + can be listed under `attribute_requirements` as shown here: + + ```yaml + + attribute_requirements: + - attribute: family_name + one_of: ["Stephensson", "Smith"] + - attribute: groups + value: "admin" + # If `value` or `one_of` are not specified, the attribute only needs + # to exist, regardless of value. + - attribute: picture + ``` + + + `attribute` is a required field, while `value` and `one_of` are optional. + + + All of the listed attributes must match for the login to be permitted. + Additional attributes can be added to userinfo by expanding the `scopes` + section of the OIDC config to retrieve additional information from the + OIDC provider. + + + If the OIDC claim is a list, then the attribute must match any value in + the list. Otherwise, it must exactly match the value of the claim. Using + the example above, the `family_name` claim MUST be either "Stephensson" or + "Smith", but the `groups` claim MUST contain "admin". + items: + type: object + properties: + idp_id: + type: string + description: >- + A unique identifier for this identity provider. Used internally by + Synapse; should be a single word such as "github". Note that, if + this is changed, users authenticating via that provider will no + longer be recognised as the same user! (Use "oidc" here if you are + migrating from an old `oidc_config` configuration.) + idp_name: + type: string + description: >- + A user-facing name for this identity provider, which is used to + offer the user a choice of login mechanisms. + idp_icon: + type: string + description: >- + An optional icon for this identity provider, which is presented by + clients and Synapse's own IdP picker page. If given, must be an MXC + URI of the format `mxc://<server-name>/<media-id>`. (An easy way to + obtain such an MXC URI is to upload an image to an (unencrypted) + room and then copy the URL from the source of the event.) + idp_brand: + type: string + description: >- + An optional brand for this identity provider, allowing clients to + style the login flow according to the identity provider in question. + See the [spec](https://spec.matrix.org/latest/) for possible options + here. + discover: + type: boolean + description: >- + Set to false to disable the use of the OIDC discovery mechanism to + discover endpoints. Defaults to true. + issuer: + type: string + description: >- + Required. The OIDC issuer. Used to validate tokens and (if discovery + is enabled) to discover the provider's endpoints. + client_id: + type: string + description: Required. OAuth2 client id to use. + client_secret: + type: ["string", "null"] + description: >- + OAuth2 client secret to use. May be omitted if + `client_secret_jwt_key` is given, or if `client_auth_method` is + `none`. Must be omitted if `client_secret_path` is specified. + client_secret_path: + type: ["string", "null"] + description: >- + Path to the OAuth2 client secret to use. With that it's not + necessary to leak secrets into the config file itself. Mutually + exclusive with `client_secret`. Can be omitted if + `client_secret_jwt_key` is specified. + + + *Added in Synapse 1.91.0.* + client_secret_jwt_key: + type: ["object", "null"] + description: >- + Alternative to client_secret: details of a key used to create a JSON + Web Token to be used as an OAuth2 client secret. + properties: + key: + type: ["string", "null"] + description: >- + A pem-encoded signing key. Must be a suitable key for the + algorithm specified. Required unless `key_file` is given. + key_file: + type: ["string", "null"] + description: >- + Path to the file containing a pem-encoded signing key. Required + unless `key` is given. + jwt_header: + type: object + description: >- + Dictionary giving properties to include in the JWT header. Must + include the key `alg`. + properties: + alg: + type: string + description: >- + Algorithm used to sign the JWT, such as ES256, using the JWA + identifiers in RFC7518. + jwt_payload: + type: object + description: >- + Optional dictionary giving properties to include in the JWT + payload. Normally this should include an `iss` key. + client_auth_method: + type: ["string", "null"] + enum: + - client_secret_basic + - client_secret_post + - none + - null + description: >- + Auth method to use when exchanging the token. Valid values are + `client_secret_basic` (default), `client_secret_post` and `none`. + pkce_method: + type: ["string", "null"] + enum: + - auto + - always + - never + - null + description: >- + Whether to use proof key for code exchange when requesting and + exchanging the token. Valid values are: `auto`, `always`, or + `never`. Defaults to `auto`, which uses PKCE if supported during + metadata discovery. Set to `always` to force enable PKCE or `never` + to force disable PKCE. + id_token_signing_alg_values_supported: + type: array + description: >- + List of the JWS signing algorithms (`alg` values) that are supported + for signing the `id_token`. + + + This is *not* required if `discovery` is disabled. We default to + supporting `RS256` in the downstream usage if no algorithms are + configured here or in the discovery document. + + + According to the spec, the algorithm `"RS256"` MUST be included. The + absolute rigid approach would be to reject this provider as + non-compliant if it's not included but we simply allow whatever and + see what happens (you're the one that configured the value and + cooperating with the identity provider). + + + The `alg` value `"none"` MAY be supported but can only be used if + the Authorization Endpoint does not include `id_token` in the + `response_type` (ex. `/authorize?response_type=code` where `none` + can apply, `/authorize?response_type=code%20id_token` where `none` + can't apply) (such as when using the Authorization Code Flow). + items: + type: string + scopes: + type: ["array", "null"] + description: >- + List of scopes to request. This should normally include the "openid" + scope. Defaults to `["openid"]`. + items: + type: string + authorization_endpoint: + type: string + description: >- + The OAuth2 authorization endpoint. Required if provider discovery is + disabled. + token_endpoint: + type: string + description: >- + The OAuth2 token endpoint. Required if provider discovery is disabled. + userinfo_endpoint: + type: string + description: >- + The OIDC userinfo endpoint. Required if discovery is disabled and + the "openid" scope is not requested. + jwks_uri: + type: string + description: >- + URI where to fetch the JWKS. Required if discovery is disabled and + the "openid" scope is used. + skip_verification: + type: boolean + description: >- + Set to `true` to skip metadata verification. Use this if you are + connecting to a provider that is not OpenID Connect compliant. + Defaults to false. Avoid this in production. + user_profile_method: + type: ["string", "null"] + enum: + - auto + - userinfo_endpoint + - null + description: >- + Whether to fetch the user profile from the userinfo endpoint, or to + rely on the data returned in the id_token from the `token_endpoint`. + Valid values are: `auto` or `userinfo_endpoint`. Defaults to `auto`, + which uses the userinfo endpoint if `openid` is not included in + `scopes`. Set to `userinfo_endpoint` to always use the userinfo + endpoint. + redirect_uri: + type: ["string", "null"] + description: >- + An optional string, that if set will override the `redirect_uri` + parameter sent in the requests to the authorization and token + endpoints. Useful if you want to redirect the client to another + endpoint as part of the OIDC login. Be aware that the client must + then call Synapse's OIDC callback URL + (`<public_baseurl>/_synapse/client/oidc/callback`) manually + afterwards. Must be a valid URL including scheme and path. + additional_authorization_parameters: + type: object + description: >- + String to string dictionary that will be passed as additional + parameters to the authorization grant URL. + additionalProperties: + type: string + passthrough_authorization_parameters: + type: array + description: >- + List of parameters that will be passed through from the redirect + endpoint to the authorization grant URL. + items: + type: string + allow_existing_users: + type: boolean + description: >- + Set to true to allow a user logging in via OIDC to match a + pre-existing account instead of failing. This could be used if + switching from password logins to OIDC. Defaults to false. + enable_registration: + type: boolean + description: >- + Set to `false` to disable automatic registration of new users. This + allows the OIDC SSO flow to be limited to sign in only, rather than + automatically registering users that have a valid SSO login but do + not have a pre-registered account. Defaults to true. + user_mapping_provider: + type: object + description: >- + Configuration for how attributes returned from a OIDC provider are + mapped onto a matrix user. + + + When rendering, the Jinja2 templates are given a `user` variable, + which is set to the claims returned by the UserInfo Endpoint and/or + in the ID Token. + properties: + module: + type: string + description: >- + The class name of a custom mapping module. Default is + `synapse.handlers.oidc.JinjaOidcMappingProvider`. See [OpenID + Mapping + Providers](../../sso_mapping_providers.md#openid-mapping-providers) + for information on implementing a custom mapping provider. + config: + type: object + description: >- + Configuration for the mapping provider module. This section will + be passed as a Python dictionary to the user mapping provider + module's `parse_config` method. + + + For the default provider, the following settings are available: + + + * `subject_template`: Jinja2 template for a unique identifier + for the user. Defaults to `{{ user.sub }}`, which OpenID Connect + compliant providers should provide. + + This replaces and overrides `subject_claim`. + + * `subject_claim`: name of the claim containing a unique + identifier for the user. Defaults to `sub`, which OpenID Connect + compliant providers should provide. + + *Deprecated in Synapse v1.75.0.* + + * `picture_template`: Jinja2 template for an url for the user's + profile picture. Defaults to `{{ user.picture }}`, which OpenID + Connect compliant providers should provide and has to refer to a + direct image file such as PNG, JPEG, or GIF image file. + + This replaces and overrides `picture_claim`. + + Currently only supported in monolithic (single-process) server configurations where the media repository runs within the Synapse process. + + * `picture_claim`: name of the claim containing an url for the + user's profile picture. Defaults to "picture", which OpenID + Connect compliant providers should provide and has to refer to a + direct image file such as PNG, JPEG, or GIF image file. + + Currently only supported in monolithic (single-process) server configurations where the media repository runs within the Synapse process. + + *Deprecated in Synapse v1.75.0.* + + * `localpart_template`: Jinja2 template for the localpart of the + MXID. If this is not set, the user will be prompted to choose + their own username (see the documentation for the + `sso_auth_account_details.html` template). This template can use + the `localpart_from_email` filter. + + + * `confirm_localpart`: Whether to prompt the user to validate + (or change) the generated localpart (see the documentation for + the "sso_auth_account_details.html" template), instead of + registering the account right away. + + + * `display_name_template`: Jinja2 template for the display name + to set on first login. If unset, no displayname will be set. + + + * `email_template`: Jinja2 template for the email address of the + user. If unset, no email address will be added to the account. + + + * `extra_attributes`: a map of Jinja2 templates for extra + attributes to send back to the client during login. Note that + these are non-standard and clients will ignore them without + modifications. + backchannel_logout_enabled: + type: boolean + description: >- + Set to `true` to process OIDC Back-Channel Logout notifications. + Those notifications are expected to be received on + `/_synapse/client/oidc/backchannel_logout`. Defaults to `false`. + backchannel_logout_ignore_sub: + type: boolean + description: >- + By default, the OIDC Back-Channel Logout feature checks that the + `sub` claim matches the subject claim received during login. This + check can be disabled by setting this to `true`. Defaults to + `false`. + + + You might want to disable this if the `subject_claim` returned by + the mapping provider is not `sub`. + default: [] + examples: + - - idp_id: my_idp + idp_name: My OpenID provider + idp_icon: "mxc://example.com/mediaid" + discover: false + issuer: "https://accounts.example.com/" + client_id: provided-by-your-issuer + client_secret: provided-by-your-issuer + client_auth_method: client_secret_post + scopes: + - openid + - profile + authorization_endpoint: "https://accounts.example.com/oauth2/auth" + token_endpoint: "https://accounts.example.com/oauth2/token" + userinfo_endpoint: "https://accounts.example.com/userinfo" + jwks_uri: "https://accounts.example.com/.well-known/jwks.json" + additional_authorization_parameters: + acr_values: 2fa + passthrough_authorization_parameters: + - login_hint + skip_verification: true + enable_registration: true + user_mapping_provider: + config: + subject_claim: id + localpart_template: "{{ user.login }}" + display_name_template: "{{ user.name }}" + email_template: "{{ user.email }}" + attribute_requirements: + - attribute: userGroup + value: synapseUsers + sso: + type: object + description: >- + Additional settings to use with single-sign on systems such as OpenID + Connect. + + + Server admins can configure custom templates for pages related to SSO. See + [here](../../templates.md) for more information. + properties: + client_whitelist: + type: ["array", "null"] + description: >- + A list of client URLs which are whitelisted so that the user does not + have to confirm giving access to their account to the URL. Any client + whose URL starts with an entry in the following list will not be + subject to an additional confirmation step after the SSO login is + completed. + + + WARNING: An entry such as "https://my.client" is insecure, because it + will also match "https://my.client.evil.site", exposing your users to + phishing attacks from evil.site. To avoid this, include a slash after + the hostname: "https://my.client/". + + + The login fallback page (used by clients that don't natively support + the required login flows) is whitelisted in addition to any URLs in + this list. By default, this list contains only the login fallback + page. + items: + type: string + default: null + update_profile_information: + type: boolean + description: >- + Use this setting to keep a user's profile fields in sync with + information from the identity provider. Currently only syncing the + displayname is supported. Fields are checked on every SSO login, and + are updated if necessary. Note that enabling this option will override + user profile information, regardless of whether users have opted-out + of syncing that information when first signing in. + default: false + examples: + - client_whitelist: + - "https://riot.im/develop" + - "https://my.custom.client/" + update_profile_information: true + jwt_config: + type: object + description: >- + JSON web token integration. The following settings can be used to make + Synapse JSON web tokens for authentication, instead of its internal + password database. + + + Each JSON Web Token needs to contain a "sub" (subject) claim, which is + used as the localpart of the mxid. + + + Additionally, the expiration time ("exp"), not before time ("nbf"), and + issued at ("iat") claims are validated if present. + + + Note that this is a non-standard login type and client support is expected + to be non-existent. + + + See [here](../../jwt.md) for more. + properties: + enabled: + type: boolean + description: Set to true to enable authorization using JSON web tokens. + default: false + secret: + type: string + description: >- + This is either the private shared secret or the public key used to + decode the contents of the JSON web token. Required if `enabled` is + set to true. + algorithm: + type: string + description: >- + The algorithm used to sign (or HMAC) the JSON web token. Supported + algorithms are listed [here (section + JWS)](https://docs.authlib.org/en/latest/specs/rfc7518.html). Required + if `enabled` is set to true. + subject_claim: + type: ["string", "null"] + description: Name of the claim containing a unique identifier for the user. + default: sub + display_name_claim: + type: ["string", "null"] + description: >- + Name of the claim containing the display name for the user. If + provided, the display name will be set to the value of this claim upon + first login. + default: null + issuer: + type: ["string", "null"] + description: >- + The issuer to validate the "iss" claim against. If provided the "iss" + claim will be required and validated for all JSON web tokens. + default: null + audiences: + type: ["array", "null"] + description: >- + A list of audiences to validate the "aud" claim against. If provided + the "aud" claim will be required and validated for all JSON web + tokens. Note that if the "aud" claim is included in a JSON web token + then validation will fail without configuring audiences. + items: + type: string + default: null + examples: + - enabled: true + secret: provided-by-your-issuer + algorithm: provided-by-your-issuer + subject_claim: name_of_claim + display_name_claim: name_of_claim + issuer: provided-by-your-issuer + audiences: + - provided-by-your-issuer + password_config: + type: object + description: Use this setting to enable password-based logins. + properties: + enabled: + type: ["boolean", "string"] + enum: + - true + - false + - only_for_reauth + description: >- + Set to false to disable password authentication. Set to + `only_for_reauth` to allow users with existing passwords to use them + to reauthenticate (not log in), whilst preventing new users from + setting passwords. + default: true + localdb_enabled: + type: boolean + description: >- + Set to false to disable authentication against the local password + database. This is ignored if `enabled` is false, and is only useful if + you have other `password_providers`. + default: true + pepper: + type: ["string", "null"] + description: >- + Set the value here to a secret random string for extra security. DO + NOT CHANGE THIS AFTER INITIAL SETUP! + default: null + policy: + type: object + description: >- + Define and enforce a password policy, such as minimum lengths for + passwords, etc. This is an implementation of MSC2000. + properties: + enabled: + type: boolean + description: Set to true to enable. + default: false + minimum_length: + type: integer + description: Minimum accepted length for a password. + default: 0 + require_digit: + type: boolean + description: Whether a password must contain at least one digit. + default: false + require_symbol: + type: boolean + description: >- + Whether a password must contain at least one symbol. A symbol is + any character that's not a number or a letter. + default: false + require_lowercase: + type: boolean + description: Whether a password must contain at least one lowercase letter. + default: false + require_uppercase: + type: boolean + description: Whether a password must contain at least one uppercase letter. + default: false + examples: + - enabled: false + localdb_enabled: false + pepper: EVEN_MORE_SECRET + policy: + enabled: true + minimum_length: 15 + require_digit: true + require_symbol: true + require_lowercase: true + require_uppercase: true + push: + type: object + description: This setting defines options for push notifications. + properties: + enabled: + type: boolean + description: >- + Enables or disables push notification calculation. Note, disabling + this will also stop unread counts being calculated for rooms. This + mode of operation is intended for homeservers which may only have bots + or appservice users connected, or are otherwise not interested in + push/unread counters. + default: true + include_content: + type: boolean + description: >- + Clients requesting push notifications can either have the body of the + message sent in the notification poke along with other details like + the sender, or just the event ID and room ID (`event_id_only`). If + clients choose to have the body sent, this option controls whether the + notification request includes the content of the event (other details + like the sender are still included). If `event_id_only` is enabled, it + has no effect. For modern Android devices the notification content + will still appear because it is loaded by the app. iPhone, however + will send a notification saying only that a message arrived and who it + came from. Set to false to only include the event ID and room ID in + push notification payloads. + default: true + group_unread_count_by_room: + type: boolean + description: >- + When a push notification is received, an unread count is also sent. + This number can either be calculated as the number of unread messages + for the user, or the number of *rooms* the user has unread messages + in. If true, push clients will see the number of rooms with unread + messages in them. Set to false to instead send the number of unread + messages. + default: true + jitter_delay: + $ref: "#/$defs/duration" + description: >- + Delays push notifications by a random amount up to the given duration. + Useful for mitigating timing attacks. Optional. + + + _Added in Synapse 1.84.0._ + default: 0s + examples: + - enabled: true + include_content: false + group_unread_count_by_room: false + jitter_delay: 10s + encryption_enabled_by_default_for_room_type: + type: string + description: >- + Controls whether locally-created rooms should be end-to-end encrypted by + default. + + + Possible options are "all", "invite", and "off". They are defined as: + + + * "all": any locally-created room + + * "invite": any room created with the `private_chat` or + `trusted_private_chat` room creation presets + + * "off": this option will take no effect + + + Note that this option will only affect rooms created after it is set. It + will also not affect rooms created by other servers. + enum: + - all + - invite + - "off" + default: "off" + examples: + - invite + user_directory: + type: object + description: This setting defines options related to the user directory. + properties: + enabled: + type: boolean + description: >- + Defines whether users can search the user directory. If false then + empty responses are returned to all queries. + default: true + search_all_users: + type: boolean + description: >- + Defines whether to search all users visible to your homeserver at the + time the search is performed. If set to true, will return all users + known to the homeserver matching the search query. If false, search + results will only contain users visible in public rooms and users + sharing a room with the requester. + + + NB. If you set this to true, and the last time the user_directory + search indexes were (re)built was before Synapse 1.44, you'll have to + rebuild the indexes in order to search through all known users. + + + These indexes are built the first time Synapse starts; admins can + manually trigger a rebuild via the API following the instructions [for + running background + updates](../administration/admin_api/background_updates.md#run), set + to true to return search results containing all known users, even if + that user does not share a room with the requester. + default: false + prefer_local_users: + type: boolean + description: >- + Defines whether to prefer local users in search query results. If set + to true, local users are more likely to appear above remote users when + searching the user directory. + default: false + exclude_remote_users: + type: boolean + description: If set to true, the search will only return local users. + default: false + show_locked_users: + type: boolean + description: Defines whether to show locked users in search query results. + default: false + examples: + - enabled: false + search_all_users: true + prefer_local_users: true + exclude_remote_users: false + show_locked_users: true + user_consent: + type: object + description: >- + For detailed instructions on user consent configuration, see + [here](../../consent_tracking.md). + + + Parts of this section are required if enabling the `consent` resource + under [`listeners`](#listeners), in particular `template_dir` and + `version`. + properties: + template_dir: + type: string + description: >- + Gives the location of the templates for the HTML forms. This directory + should contain one subdirectory per language (eg, `en`, `fr`), and + each language directory should contain the policy document (named as + <version>.html) and a success page (success.html). + version: + type: number + description: >- + Specifies the "current" version of the policy document. It defines the + version to be served by the consent resource if there is no `v` + parameter. + server_notice_content: + type: object + description: >- + If enabled, will send a user a "Server Notice" asking them to consent + to the privacy policy. The [`server_notices` section](#server_notices) + must also be configured for this to work. Notices will *not* be sent + to guest users unless `send_server_notice_to_guests` is set to true. + properties: + msgtype: + type: string + description: Message type of the notice event. + body: + type: string + description: Message template for the server notice event body. + send_server_notice_to_guests: + type: boolean + description: Send server notices to guest users, too. + default: false + block_events_error: + type: ["string", "null"] + description: >- + If set, will block any attempts to send events until the user consents + to the privacy policy. The value of the setting is used as the text of + the error. + default: null + require_at_registration: + type: boolean + description: >- + If enabled, will add a step to the registration process, similar to + how captcha works. Users will be required to accept the policy before + their account is created. + policy_name: + type: string + description: Human-readable name of the privacy policy. + default: Privacy Policy + examples: + - template_dir: res/templates/privacy + version: 1.0 + server_notice_content: + msgtype: m.text + body: >- + To continue using this homeserver you must review and agree to the + terms and conditions at %(consent_uri)s + send_server_notice_to_guests: true + block_events_error: >- + To continue using this homeserver you must review and agree to the + terms and conditions at %(consent_uri)s + require_at_registration: false + policy_name: Privacy Policy + stats: + type: object + description: >- + Settings for local room and user statistics collection. See + [here](../../room_and_user_statistics.md) for more. + properties: + enabled: + type: boolean + description: >- + Set to false to disable room and user statistics. Note that doing so + may cause certain features (such as the room directory) not to work + correctly. + default: true + examples: + - enabled: false + server_notices: + type: object + description: >- + Use this setting to enable a room which can be used to send notices from + the server to users. It is a special room which users cannot leave; + notices in the room come from a special "notices" user id. + + + If you use this setting, you *must* define the `system_mxid_localpart` + sub-setting, which defines the id of the user which will be used to send + the notices. + + + Note that the name, topic and avatar of existing server notice rooms will + only be updated when a new notice event is sent. + properties: + system_mxid_display_name: + type: string + description: Display name of the "notices" user. + default: Notices + system_mxid_avatar_url: + type: ["string", "null"] + description: Avatar for the "notices" user. + default: null + room_name: + type: string + description: Room name of the server notices room. + default: Server Notices + room_avatar_url: + type: ["string", "null"] + description: >- + Room avatar to use for server notice rooms. If set to the empty string + `""`, notice rooms will not be given an avatar. + + + _Added in Synapse 1.99.0._ + default: null + room_topic: + type: ["string", "null"] + description: >- + Topic to use for server notice rooms. If set to the empty string `""`, + notice rooms will not be given a topic. Defaults to the empty string. + + + _Added in Synapse 1.99.0._ + default: null + auto_join: + type: boolean + description: >- + If true, the user will be automatically joined to the room instead of + being invited. + + + _Added in Synapse 1.98.0._ + default: false + examples: + - system_mxid_localpart: notices + system_mxid_display_name: Server Notices + system_mxid_avatar_url: "mxc://example.com/oumMVlgDnLYFaPVkExemNVVZ" + room_name: Server Notices + room_avatar_url: "mxc://example.com/oumMVlgDnLYFaPVkExemNVVZ" + room_topic: >- + Room used by your server admin to notice you of important information + auto_join: true + enable_room_list_search: + type: boolean + description: >- + Set to false to disable searching the public room list. When disabled + blocks searching local and remote room lists for local and remote users by + always returning an empty list for all queries. + default: true + examples: + - false + alias_creation_rules: + type: ["array", "null"] + description: >- + The `alias_creation_rules` option allows server admins to prevent unwanted + alias creation on this server. + + + This setting is an optional list of 0 or more rules. By default, no list + is provided, meaning that all alias creations are permitted. + + + Otherwise, requests to create aliases are matched against each rule in + order. The first rule that matches decides if the request is allowed or + denied. If no rule matches, the request is denied. In particular, this + means that configuring an empty list of rules will deny every alias + creation request. + + + Each of the glob patterns is optional, defaulting to `*` ("match + anything"). Note that the patterns match against fully qualified IDs, e.g. + against `@alice:example.com`, `#room:example.com` and + `!abcdefghijk:example.com` instead of `alice`, `room` and `abcedgghijk`. + + + Each rule is a YAML object containing four fields, each of which is an + optional string + items: + type: object + properties: + user_id: + type: ["string", "null"] + description: Glob pattern that matches against the creator of the alias. + alias: + type: ["string", "null"] + description: Glob pattern that matches against the alias being created. + room_id: + type: ["string", "null"] + description: >- + Glob pattern that matches against the room ID the alias is being pointed at. + action: + type: string + enum: + - allow + - deny + description: >- + Either `allow` or `deny`. What to do with the request if the rule + matches. Defaults to `allow`. + default: null + examples: + - null + - - action: allow + - [] + - - action: deny + - - user_id: "@bad_user:example.com" + action: deny + - action: allow + - - room_id: "!forbiddenRoom:example.com" + action: deny + - action: allow + room_list_publication_rules: + type: ["array", "null"] + description: >- + The `room_list_publication_rules` option allows server admins to prevent + unwanted entries from being published in the public room list. + + + The format of this option is the same as that for + [`alias_creation_rules`](#alias_creation_rules): an optional list of 0 or + more rules. By default, no list is provided, meaning that no one may + publish to the room list (except server admins). + + + Otherwise, requests to publish a room are matched against each rule in + order. The first rule that matches decides if the request is allowed or + denied. If no rule matches, the request is denied. In particular, this + means that configuring an empty list of rules will deny every alias + creation request. + + + Requests to create a public (public as in published to the room directory) + room which violates the configured rules will result in the room being + created but not published to the room directory. + + + Each of the glob patterns is optional, defaulting to `*` ("match + anything"). Note that the patterns match against fully qualified IDs, e.g. + against `@alice:example.com`, `#room:example.com` and + `!abcdefghijk:example.com` instead of `alice`, `room` and `abcedgghijk`. + + + Each rule is a YAML object containing four fields, each of which is an + optional string. + + + _Changed in Synapse 1.126.0: The default was changed to deny publishing to + the room list by default_ + items: + type: object + properties: + user_id: + type: ["string", "null"] + description: Glob pattern that matches against the user publishing the room. + alias: + type: ["string", "null"] + description: >- + Glob pattern that matches against one of published room's aliases. + + - If the room has no aliases, the alias match fails unless `alias` + is unspecified or `*`. + + - If the room has exactly one alias, the alias match succeeds if the + `alias` pattern matches that alias. + + - If the room has two or more aliases, the alias match succeeds if + the pattern matches at least one of the aliases. + room_id: + type: ["string", "null"] + description: >- + Glob pattern that matches against the room ID of the room being published. + action: + type: string + enum: + - allow + - deny + description: >- + Either `allow` or `deny`. What to do with the request if the rule + matches. Defaults to `allow`. + default: null + examples: + - null + - - action: deny + - [] + - - action: allow + - - user_id: "@bad_user:example.com" + action: deny + - action: allow + - - room_id: "!forbiddenRoom:example.com" + action: deny + - action: allow + - - alias: "#*potato*:example.com" + action: deny + - action: allow + default_power_level_content_override: + type: object + description: >- + The `default_power_level_content_override` option controls the default + power levels for rooms. + + + Useful if you know that your users need special permissions in rooms that + they create (e.g. to send particular types of state events without needing + an elevated power level). This takes the same shape as the + `power_level_content_override` parameter in the /createRoom API, but is + applied before that parameter. + + + Note that each key provided inside a preset (for example `events` in the + example below) will overwrite all existing defaults inside that key. So in + Example #1, newly-created private_chat rooms will have no rules for any + event types except `com.example.foo`. + + + The default power levels for each preset are: + + + ```yaml + + "m.room.name": 50 + + "m.room.power_levels": 100 + + "m.room.history_visibility": 100 + + "m.room.canonical_alias": 50 + + "m.room.avatar": 50 + + "m.room.tombstone": 100 + + "m.room.server_acl": 100 + + "m.room.encryption": 100 + + ``` + + + In Example #2 the default power-levels for a preset are maintained, but + the power level for a new key is set. + default: {} + examples: + - private_chat: + events: + com.example.foo: 0 + trusted_private_chat: null + public_chat: null + - private_chat: + events: + com.example.foo: 0 + m.room.name: 50 + m.room.power_levels: 100 + m.room.history_visibility: 100 + m.room.canonical_alias: 50 + m.room.avatar: 50 + m.room.tombstone: 100 + m.room.server_acl: 100 + m.room.encryption: 100 + trusted_private_chat: null + public_chat: null + forget_rooms_on_leave: + type: boolean + description: >- + Set to true to automatically forget rooms for users when they leave them, + either normally or via a kick or ban. + default: false + examples: + - true + exclude_rooms_from_sync: + type: array + description: >- + A list of rooms to exclude from sync responses. This is useful for server + administrators wishing to group users into a room without these users + being able to see it from their client. + items: + type: string + default: [] + examples: + - - "!foo:example.com" + opentracing: + type: object + description: >- + These settings enable and configure opentracing, which implements + distributed tracing. This allows you to observe the causal chains of + events across servers including requests, key lookups etc., across any + server running synapse or any other services which support opentracing + (specifically those implemented with Jaeger). + properties: + enabled: + type: boolean + description: Whether tracing is enabled. Set to true to enable. + default: false + homeserver_whitelist: + type: array + description: >- + The list of homeservers we wish to send and receive span contexts and + span baggage. See [here](../../opentracing.md) for more. This is a + list of regexes which are matched against the `server_name` of the + homeserver. If the list is empty, no servers are matched. + items: + type: string + default: [] + force_tracing_for_users: + type: array + description: >- + A list of the matrix IDs of users whose requests will always be + traced, even if the tracing system would otherwise drop the traces due + to probabilistic sampling. + items: + type: string + default: [] + jaeger_config: + type: object + description: >- + Jaeger can be configured to sample traces at different rates. All + configuration options provided by Jaeger can be set here. Jaeger's + configuration is mostly related to trace sampling which is documented + [here](https://www.jaegertracing.io/docs/latest/sampling/). + default: {} + examples: + - enabled: true + homeserver_whitelist: + - ".*" + force_tracing_for_users: + - "@user1:server_name" + - "@user2:server_name" + jaeger_config: + sampler: + type: const + param: 1 + logging: false + worker_replication_secret: + type: ["string", "null"] + description: >- + A shared secret used by the replication APIs on the main process to + authenticate HTTP requests from workers. + + + If unset or null, traffic between the workers and the main process is not + authenticated. + + + Replacing an existing `worker_replication_secret` with a new one will + break communication with all workers that have not yet updated their + secret. + default: null + examples: + - secret_secret + worker_replication_secret_path: + type: ["string", "null"] + description: >- + An alternative to + [`worker_replication_secret`](#worker_replication_secret): allows the + secret to be specified in an external file. + + + The file should be a plain text file, containing only the secret. Synapse + reads the secret from the given file once at startup. + + + _Added in Synapse 1.126.0._ + default: null + examples: + - /path/to/secrets/file + start_pushers: + type: boolean + description: >- + Unnecessary to set if using [`pusher_instances`](#pusher_instances) with + [`generic_workers`](../../workers.md#synapseappgeneric_worker). + + + Controls sending of push notifications on the main process. Set to `false` + if using a [pusher worker](../../workers.md#synapseapppusher). + default: true + examples: + - false + pusher_instances: + type: array + description: >- + It is possible to scale the processes that handle sending push + notifications to [sygnal](https://github.com/matrix-org/sygnal) and email + by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) + and adding it's [`worker_name`](#worker_name) to a `pusher_instances` map. + Doing so will remove handling of this function from the main process. + Multiple workers can be added to this map, in which case the work is + balanced across them. Ensure the main process and all pusher workers are + restarted after changing this option. + items: + type: string + default: [] + examples: + - - pusher_worker1 + - - pusher_worker1 + - pusher_worker2 + send_federation: + type: boolean + description: >- + Unnecessary to set if using + [`federation_sender_instances`](#federation_sender_instances) with + [`generic_workers`](../../workers.md#synapseappgeneric_worker). + + + Controls sending of outbound federation transactions on the main process. + Set to `false` if using a [federation sender + worker](../../workers.md#synapseappfederation_sender). + default: true + examples: + - false + federation_sender_instances: + type: array + description: >- + It is possible to scale the processes that handle sending outbound + federation requests by running a + [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding + it's [`worker_name`](#worker_name) to a `federation_sender_instances` map. + Doing so will remove handling of this function from the main process. + Multiple workers can be added to this map, in which case the work is + balanced across them. + + + The way that the load balancing works is any outbound federation request + will be assigned to a federation sender worker based on the hash of the + destination server name. This means that all requests being sent to the + same destination will be processed by the same worker instance. Multiple + `federation_sender_instances` are useful if there is a federation with + multiple servers. + + + This configuration setting must be shared between all workers handling + federation sending, and if changed all federation sender workers must be + stopped at the same time and then started, to ensure that all instances + are running with the same config (otherwise events may be dropped). + items: + type: string + default: [] + examples: + - - federation_sender1 + - - federation_sender1 + - federation_sender2 + instance_map: + type: object + description: >- + When using workers this should be a map from [`worker_name`](#worker_name) + to the HTTP replication listener of the worker, if configured, and to the + main process. Each worker declared under + [`stream_writers`](../../workers.md#stream-writers) and + [`outbound_federation_restricted_to`](#outbound_federation_restricted_to) + needs a HTTP replication listener, and that listener should be included in + the `instance_map`. The main process also needs an entry on the + `instance_map`, and it should be listed under `main` **if even one other + worker exists**. Ensure the port matches with what is declared inside the + `listener` block for a `replication` listener. + additionalProperties: + type: object + default: {} + examples: + - main: + host: localhost + port: 8030 + worker1: + host: localhost + port: 8034 + other: + host: localhost + port: 8035 + tls: true + - main: + path: /run/synapse/main_replication.sock + worker1: + path: /run/synapse/worker1_replication.sock + stream_writers: + type: object + description: >- + Experimental: When using workers you can define which workers should + handle writing to streams such as event persistence and typing + notifications. Any worker specified here must also be in the + [`instance_map`](#instance_map). + + + See the list of available streams in the [worker + documentation](../../workers.md#stream-writers). + properties: + events: + type: string + description: Name of a worker assigned to the `events` stream. + typing: + type: string + description: Name of a worker assigned to the `typing` stream. + to_device: + type: string + description: Name of a worker assigned to the `to_device` stream. + account_data: + type: string + description: Name of a worker assigned to the `account_data` stream. + receipts: + type: string + description: Name of a worker assigned to the `receipts` stream. + presence: + type: string + description: Name of a worker assigned to the `presence` stream. + push_rules: + type: string + description: Name of a worker assigned to the `push_rules` stream. + default: {} + examples: + - events: worker1 + typing: worker1 + outbound_federation_restricted_to: + type: array + description: >- + When using workers, you can restrict outbound federation traffic to only + go through a specific subset of workers. Any worker specified here must + also be in the [`instance_map`](#instance_map). + [`worker_replication_secret`](#worker_replication_secret) must also be + configured to authorize inter-worker communication. + + + Also see the [worker + documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers) + for more info. + + + _Added in Synapse 1.89.0._ + items: + type: string + default: [] + examples: + - - federation_sender1 + - federation_sender2 + run_background_tasks_on: + type: ["string", "null"] + description: >- + The [worker](../../workers.md#background-tasks) that is used to run + background tasks (e.g. cleaning up expired data). If not provided this + defaults to the main process. + default: null + examples: + - worker1 + update_user_directory_from_worker: + type: ["string", "null"] + description: >- + The [worker](../../workers.md#updating-the-user-directory) that is used to + update the user directory. If not provided this defaults to the main + process. + + + _Added in Synapse 1.59.0._ + default: null + examples: + - worker1 + notify_appservices_from_worker: + type: ["string", "null"] + description: >- + The [worker](../../workers.md#notifying-application-services) that is used + to send output traffic to Application Services. If not provided this + defaults to the main process. + + + _Added in Synapse 1.59.0._ + default: null + examples: + - worker1 + media_instance_running_background_jobs: + type: ["string", "null"] + description: >- + The [worker](../../workers.md#synapseappmedia_repository) that is used to + run background tasks for media repository. If running multiple media + repositories you must configure a single instance to run the background + tasks. If not provided this defaults to the main process or your single + `media_repository` worker. + + + _Added in Synapse 1.16.0._ + default: null + examples: + - worker1 + redis: + type: object + description: >- + Configuration for Redis when using workers. This *must* be enabled when + using workers. + + + _Added in Synapse 1.78.0._ + + + _Changed in Synapse 1.84.0: Added use\_tls, certificate\_file, + private\_key\_file, ca\_file and ca\_path attributes_ + + + _Changed in Synapse 1.85.0: Added path option to use a local Unix socket_ + + + _Changed in Synapse 1.116.0: Added password\_path_ + properties: + enabled: + type: boolean + description: Whether to use Redis support. + default: false + host: + type: string + description: Optional host to use to connect to Redis. + default: localhost + port: + type: integer + description: Optional port to use to connect to Redis. + default: 6379 + path: + type: string + description: >- + The full path to a local Unix socket file. **If this is used, `host` + and `port` are ignored.** + default: /tmp/redis.sock + password: + type: ["string", "null"] + description: Optional password if configured on the Redis instance. + default: null + password_path: + type: ["string", "null"] + description: >- + Alternative to `password`, reading the password from an external file. + The file should be a plain text file, containing only the password. + Synapse reads the password from the given file once at startup. + default: null + dbid: + type: ["string", "null"] + description: >- + Optional redis dbid if needs to connect to specific redis logical db. + default: null + use_tls: + type: boolean + description: Whether to use a TLS connection. + default: false + certificate_file: + type: ["string", "null"] + description: Optional path to the certificate file. + default: null + private_key_file: + type: ["string", "null"] + description: Optional path to the private key file. + default: null + ca_file: + type: ["string", "null"] + description: >- + Optional path to the CA certificate file. Use this one or `ca_path` + default: null + ca_path: + type: ["string", "null"] + description: >- + Optional path to the folder containing the CA certificate file. Use + this one or `ca_file` + default: null + examples: + - enabled: true + host: localhost + port: 6379 + password_path: "<path_to_the_password_file>" + dbid: "<dbid>" + worker_app: + type: string + description: >- + The type of worker. The currently available worker applications are listed + in [worker documentation](../../workers.md#available-worker-applications). + + + The most common worker is the + [`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker). + examples: + - synapse.app.generic_worker + worker_name: + type: string + description: >- + A unique name for the worker. The worker needs a name to be addressed in + further parameters and identification in log files. We strongly recommend + giving each worker a unique `worker_name`. + examples: + - generic_worker1 + worker_listeners: + type: array + description: >- + A worker can handle HTTP requests. To do so, a `worker_listeners` option + must be declared, in the same way as the [`listeners` option](#listeners) + in the shared config. + + + Workers declared in [`stream_writers`](#stream_writers) and + [`instance_map`](#instance_map) will need to include a `replication` + listener here, in order to accept internal HTTP requests from other + workers. + + + Example #2 is using UNIX sockets with a `replication` listener. + default: [] + examples: + - - type: http + port: 8083 + resources: + - names: + - client + - federation + - - type: http + path: /run/synapse/worker_replication.sock + resources: + - names: + - replication + - type: http + path: /run/synapse/worker_public.sock + resources: + - names: + - client + - federation + worker_manhole: + type: ["integer", "null"] + description: >- + A worker may have a listener for [`manhole`](../../manhole.md). It allows + server administrators to access a Python shell on the worker. + + + The example below is a short form for + + ```yaml + + worker_listeners: + - port: 9000 + bind_addresses: ['127.0.0.1'] + type: manhole + ``` + + + It needs also an additional [`manhole_settings`](#manhole_settings) + configuration. + default: null + examples: + - 9000 + worker_daemonize: + type: boolean + description: >- + Specifies whether the worker should be started as a daemon process. If + Synapse is being managed by [systemd](../../systemd-with-workers/), this + option must be omitted or set to `false`. + default: false + examples: + - true + worker_pid_file: + type: ["string", "null"] + description: >- + When running a worker as a daemon, we need a place to store the + [PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker. + This option defines the location of that "pid file". + + + This option is required if `worker_daemonize` is `true` and ignored + otherwise. + + + See also the [`pid_file` option](#pid_file) option for the main Synapse + process. + default: null + examples: + - DATADIR/generic_worker1.pid + worker_log_config: + type: ["string", "null"] + description: >- + This option specifies a yaml python logging config file as described + [here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema). + See also the [`log_config` option](#log_config) option for the main + Synapse process. + default: null + examples: + - /etc/matrix-synapse/generic-worker-log.yaml + background_updates: + type: object + description: >- + Background updates are database updates that are run in the background in + batches. The duration, minimum batch size, default batch size, whether to + sleep between batches and if so, how long to sleep can all be configured. + This is helpful to speed up or slow down the updates. + properties: + background_update_duration_ms: + type: integer + description: How long in milliseconds to run a batch of background updates for. + default: 100 + sleep_enabled: + type: boolean + description: Whether to sleep between updates. + default: true + sleep_duration_ms: + type: integer + description: If sleeping between updates, how long in milliseconds to sleep for. + default: 1000 + min_batch_size: + type: integer + description: >- + Minimum size a batch of background updates can be. Must be greater than 0. + default: 1 + default_batch_size: + type: integer + description: >- + The batch size to use for the first iteration of a new background update. + default: 100 + examples: + - background_update_duration_ms: 500 + sleep_enabled: false + sleep_duration_ms: 300 + min_batch_size: 10 + default_batch_size: 50 + auto_accept_invites: + type: object + description: >- + Automatically accepting invites controls whether users are presented with + an invite request or if they are instead automatically joined to a room + when receiving an invite. Set the `enabled` sub-option to true to enable + auto-accepting invites. + + + NOTE: Care should be taken not to enable this setting if the + `synapse_auto_accept_invite` module is enabled and installed. The two + modules will compete to perform the same task and may result in undesired + behaviour. For example, multiple join events could be generated from a + single invite. + properties: + enabled: + type: boolean + description: Whether to run the auto-accept invites logic. + default: false + only_for_direct_messages: + type: boolean + description: >- + Whether invites should be automatically accepted for all room types, + or only for direct messages. + default: false + only_from_local_users: + type: boolean + description: >- + Whether to only automatically accept invites from users on this homeserver. + default: false + worker_to_run_on: + type: ["string", "null"] + description: >- + Which worker to run this module on. This must match the "worker_name". + If not set or `null`, invites will be accepted on the main process. + default: null + examples: + - enabled: true + only_for_direct_messages: true + only_from_local_users: true + worker_to_run_on: worker_1 +$defs: + bytes: + type: ["string", "integer"] + io.element.type_name: byte size + duration: + type: ["string", "integer"] + io.element.type_name: duration + size: + type: ["string", "integer"] + io.element.type_name: size + rc: + type: object + properties: + per_second: + type: number + description: Maximum number of requests a client can send per second. + burst_count: + type: number + description: >- + Maximum number of requests a client can send before being throttled. + database: + type: object + description: >- + The `database` setting defines the database that synapse uses to store all + of its data. + + + For more information on using Synapse with Postgres, see + [here](../../postgres.md). + properties: + name: + type: string + enum: + - sqlite3 + - psycopg2 + description: >- + This option specifies the database engine to use: either `sqlite3` + (for SQLite) or `psycopg2` (for PostgreSQL). If no name is specified + Synapse will default to SQLite. + default: sqlite3 + txn_limit: + type: integer + description: >- + Gives the maximum number of transactions to run per connection before + reconnecting. 0 means no limit. + default: 0 + allow_unsafe_locale: + type: boolean + description: >- + This option is specific to Postgres. Under the default behavior, + Synapse will refuse to start if the postgres db is set to a non-C + locale. You can override this behavior (which is *not* recommended) by + setting `allow_unsafe_locale` to true. Note that doing so may corrupt + your database. You can find more information + [here](../../postgres.md#fixing-incorrect-collate-or-ctype) and + [here](https://wiki.postgresql.org/wiki/Locale_data_changes). + default: false + args: + type: object + description: >- + Gives options which are passed through to the database engine, except + for options starting with `cp_`, which are used to configure the + Twisted connection pool. For a reference to valid arguments, see: + + * for + [sqlite](https://docs.python.org/3/library/sqlite3.html#sqlite3.connect) + + * for + [postgres](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS) + + * for [the connection + pool](https://docs.twistedmatrix.com/en/stable/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__) diff --git a/schema/v1/Do not edit files in this folder b/schema/v1/Do not edit files in this folder new file mode 100644
index 0000000000..c95c3bfed0 --- /dev/null +++ b/schema/v1/Do not edit files in this folder
@@ -0,0 +1,2 @@ +If you want to update the meta schema, copy this folder and increase its version +number instead. diff --git a/schema/v1/meta.schema.json b/schema/v1/meta.schema.json new file mode 100644
index 0000000000..0c2c46f1a9 --- /dev/null +++ b/schema/v1/meta.schema.json
@@ -0,0 +1,29 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true, + "https://element-hq.github.io/synapse/latest/schema/v1/vocab/documentation": false + }, + "$ref": "https://json-schema.org/draft/2020-12/schema", + "properties": { + "io.element.type_name": { + "type": "string", + "description": "Human-readable type of a schema that is displayed instead of the standard JSON Schema types like `object` or `integer`. In case the JSON Schema type contains `null`, this information should be presented alongside the human-readable type name.", + "examples": ["duration", "byte size"] + }, + "io.element.post_description": { + "type": "string", + "description": "Additional description of a schema, better suited to be placed less prominently in the generated documentation, e.g., at the end of a section after listings of items and properties.", + "examples": [ + "### Advanced uses\n\nThe spent coffee grounds can be added to compost for improving soil and growing plants." + ] + } + } +} diff --git a/schema/v1/vocab/documentation.html b/schema/v1/vocab/documentation.html new file mode 100644
index 0000000000..6a45f4beb3 --- /dev/null +++ b/schema/v1/vocab/documentation.html
@@ -0,0 +1,11 @@ +<!DOCTYPE html> +<html lang="en"> + <head> + <meta http-equiv="refresh" content="0; URL=../meta.schema.json"> + <meta charset="UTF-8"> + <title>Redirecting to ../meta.schema.json…</title> + </head> + <body> + <p>Redirecting to <a href="../meta.schema.json">../meta.schema.json</a>…</p> + </body> +</html> diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py
index de2a134544..6ee695b2ba 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py
@@ -28,12 +28,11 @@ from typing import Collection, Optional, Sequence, Set # example) DISTS = ( "debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05) - "debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24) - "debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24) - "ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14) + "debian:bookworm", # (EOL 2026-06) (our EOL forced by Python 3.11 is 2027-10-24) + "debian:sid", # (rolling distro, no EOL) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04) - "ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24) - "ubuntu:mantic", # 23.10 (EOL 2024-07) (our EOL forced by Python 3.11 is 2027-10-24) + "ubuntu:noble", # 24.04 LTS (EOL 2029-06) + "ubuntu:oracular", # 24.10 (EOL 2025-07) "debian:trixie", # (EOL not specified yet) ) diff --git a/scripts-dev/check_pydantic_models.py b/scripts-dev/check_pydantic_models.py
index 9e67375b6a..5eb1f0a9df 100755 --- a/scripts-dev/check_pydantic_models.py +++ b/scripts-dev/check_pydantic_models.py
@@ -31,6 +31,7 @@ Pydantic does not yet offer a strict mode, but it is planned for pydantic v2. Se until then, this script is a best effort to stop us from introducing type coersion bugs (like the infamous stringy power levels fixed in room version 10). """ + import argparse import contextlib import functools @@ -44,7 +45,6 @@ import traceback import unittest.mock from contextlib import contextmanager from typing import ( - TYPE_CHECKING, Any, Callable, Dict, @@ -56,30 +56,17 @@ from typing import ( ) from parameterized import parameterized - -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import ( - BaseModel as PydanticBaseModel, - conbytes, - confloat, - conint, - constr, - ) - from pydantic.v1.typing import get_args -else: - from pydantic import ( - BaseModel as PydanticBaseModel, - conbytes, - confloat, - conint, - constr, - ) - from pydantic.typing import get_args - from typing_extensions import ParamSpec +from synapse._pydantic_compat import ( + BaseModel as PydanticBaseModel, + conbytes, + confloat, + conint, + constr, + get_args, +) + logger = logging.getLogger(__name__) CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: List[Callable] = [ @@ -182,22 +169,16 @@ def monkeypatch_pydantic() -> Generator[None, None, None]: # Most Synapse code ought to import the patched objects directly from # `pydantic`. But we also patch their containing modules `pydantic.main` and # `pydantic.types` for completeness. - patch_basemodel1 = unittest.mock.patch( - "pydantic.BaseModel", new=PatchedBaseModel - ) - patch_basemodel2 = unittest.mock.patch( - "pydantic.main.BaseModel", new=PatchedBaseModel + patch_basemodel = unittest.mock.patch( + "synapse._pydantic_compat.BaseModel", new=PatchedBaseModel ) - patches.enter_context(patch_basemodel1) - patches.enter_context(patch_basemodel2) + patches.enter_context(patch_basemodel) for factory in CONSTRAINED_TYPE_FACTORIES_WITH_STRICT_FLAG: wrapper: Callable = make_wrapper(factory) - patch1 = unittest.mock.patch(f"pydantic.{factory.__name__}", new=wrapper) - patch2 = unittest.mock.patch( - f"pydantic.types.{factory.__name__}", new=wrapper + patch = unittest.mock.patch( + f"synapse._pydantic_compat.{factory.__name__}", new=wrapper ) - patches.enter_context(patch1) - patches.enter_context(patch2) + patches.enter_context(patch) yield diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py
index 467be96fdf..454784c3ae 100755 --- a/scripts-dev/check_schema_delta.py +++ b/scripts-dev/check_schema_delta.py
@@ -1,6 +1,8 @@ #!/usr/bin/env python3 # Check that no schema deltas have been added to the wrong version. +# +# Also checks that schema deltas do not try and create or drop indices. import re from typing import Any, Dict, List @@ -9,6 +11,13 @@ import click import git SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$") +INDEX_CREATION_REGEX = re.compile(r"CREATE .*INDEX .*ON ([a-z_]+)", flags=re.IGNORECASE) +INDEX_DELETION_REGEX = re.compile(r"DROP .*INDEX ([a-z_]+)", flags=re.IGNORECASE) +TABLE_CREATION_REGEX = re.compile(r"CREATE .*TABLE ([a-z_]+)", flags=re.IGNORECASE) + +# The base branch we want to check against. We use the main development branch +# on the assumption that is what we are developing against. +DEVELOP_BRANCH = "develop" @click.command() @@ -20,6 +29,9 @@ SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$") help="Always output ANSI colours", ) def main(force_colors: bool) -> None: + # Return code. Set to non-zero when we encounter an error + return_code = 0 + click.secho( "+++ Checking schema deltas are in the right folder", fg="green", @@ -30,17 +42,17 @@ def main(force_colors: bool) -> None: click.secho("Updating repo...") repo = git.Repo() - repo.remote().fetch() + repo.remote().fetch(refspec=DEVELOP_BRANCH) click.secho("Getting current schema version...") - r = repo.git.show("origin/develop:synapse/storage/schema/__init__.py") + r = repo.git.show(f"origin/{DEVELOP_BRANCH}:synapse/storage/schema/__init__.py") locals: Dict[str, Any] = {} exec(r, locals) current_schema_version = locals["SCHEMA_VERSION"] - diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None) + diffs: List[git.Diff] = repo.remote().refs[DEVELOP_BRANCH].commit.diff(None) # Get the schema version of the local file to check against current schema on develop with open("synapse/storage/schema/__init__.py") as file: @@ -53,7 +65,7 @@ def main(force_colors: bool) -> None: # local schema version must be +/-1 the current schema version on develop if abs(local_schema_version - current_schema_version) != 1: click.secho( - "The proposed schema version has diverged more than one version from develop, please fix!", + f"The proposed schema version has diverged more than one version from {DEVELOP_BRANCH}, please fix!", fg="red", bold=True, color=force_colors, @@ -67,21 +79,28 @@ def main(force_colors: bool) -> None: click.secho(f"Current schema version: {current_schema_version}") seen_deltas = False - bad_files = [] + bad_delta_files = [] + changed_delta_files = [] for diff in diffs: - if not diff.new_file or diff.b_path is None: + if diff.b_path is None: + # We don't lint deleted files. continue match = SCHEMA_FILE_REGEX.match(diff.b_path) if not match: continue + changed_delta_files.append(diff.b_path) + + if not diff.new_file: + continue + seen_deltas = True _, delta_version, _ = match.groups() if delta_version != str(current_schema_version): - bad_files.append(diff.b_path) + bad_delta_files.append(diff.b_path) if not seen_deltas: click.secho( @@ -92,41 +111,91 @@ def main(force_colors: bool) -> None: ) return - if not bad_files: + if bad_delta_files: + bad_delta_files.sort() + click.secho( - f"All deltas are in the correct folder: {current_schema_version}!", - fg="green", + "Found deltas in the wrong folder!", + fg="red", bold=True, color=force_colors, ) - return - bad_files.sort() - - click.secho( - "Found deltas in the wrong folder!", - fg="red", - bold=True, - color=force_colors, - ) + for f in bad_delta_files: + click.secho( + f"\t{f}", + fg="red", + bold=True, + color=force_colors, + ) - for f in bad_files: + click.secho() click.secho( - f"\t{f}", + f"Please move these files to delta/{current_schema_version}/", fg="red", bold=True, color=force_colors, ) - click.secho() - click.secho( - f"Please move these files to delta/{current_schema_version}/", - fg="red", - bold=True, - color=force_colors, - ) + else: + click.secho( + f"All deltas are in the correct folder: {current_schema_version}!", + fg="green", + bold=True, + color=force_colors, + ) - click.get_current_context().exit(1) + # Make sure we process them in order. This sort works because deltas are numbered + # and delta files are also numbered in order. + changed_delta_files.sort() + + # Now check that we're not trying to create or drop indices. If we want to + # do that they should be in background updates. The exception is when we + # create indices on tables we've just created. + created_tables = set() + for delta_file in changed_delta_files: + with open(delta_file) as fd: + delta_lines = fd.readlines() + + for line in delta_lines: + # Strip SQL comments + line = line.split("--", maxsplit=1)[0] + + # Check and track any tables we create + match = TABLE_CREATION_REGEX.search(line) + if match: + table_name = match.group(1) + created_tables.add(table_name) + + # Check for dropping indices, these are always banned + match = INDEX_DELETION_REGEX.search(line) + if match: + clause = match.group() + + click.secho( + f"Found delta with index deletion: '{clause}' in {delta_file}\nThese should be in background updates.", + fg="red", + bold=True, + color=force_colors, + ) + return_code = 1 + + # Check for index creation, which is only allowed for tables we've + # created. + match = INDEX_CREATION_REGEX.search(line) + if match: + clause = match.group() + table_name = match.group(1) + if table_name not in created_tables: + click.secho( + f"Found delta with index creation: '{clause}' in {delta_file}\nThese should be in background updates.", + fg="red", + bold=True, + color=force_colors, + ) + return_code = 1 + + click.get_current_context().exit(return_code) if __name__ == "__main__": diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index 4ad547bc7e..08b500ecd6 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh
@@ -195,6 +195,10 @@ if [ -z "$skip_docker_build" ]; then # Build the unified Complement image (from the worker Synapse image we just built). echo_if_github "::group::Build Docker image: complement/Dockerfile" $CONTAINER_RUNTIME build -t complement-synapse \ + `# This is the tag we end up pushing to the registry (see` \ + `# .github/workflows/push_complement_image.yml) so let's just label it now` \ + `# so people can reference it by the same name locally.` \ + -t ghcr.io/element-hq/synapse/complement-synapse \ -f "docker/complement/Dockerfile" "docker/complement" echo_if_github "::endgroup::" @@ -220,9 +224,12 @@ test_packages=( ./tests/msc3874 ./tests/msc3890 ./tests/msc3391 + ./tests/msc3757 ./tests/msc3930 ./tests/msc3902 ./tests/msc3967 + ./tests/msc4140 + ./tests/msc4155 ) # Enable dirty runs, so tests will reuse the same container where possible. diff --git a/scripts-dev/gen_config_documentation.py b/scripts-dev/gen_config_documentation.py new file mode 100755
index 0000000000..a169fd323f --- /dev/null +++ b/scripts-dev/gen_config_documentation.py
@@ -0,0 +1,494 @@ +#!/usr/bin/env python3 +"""Generate Synapse documentation from JSON Schema file.""" + +import json +import re +import sys +from typing import Any, Optional + +import yaml + +HEADER = """<!-- Document auto-generated by scripts-dev/gen_config_documentation.py --> + +# Configuring Synapse + +This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified +through the many configuration settings documented here — each config option is explained, +including what the default is, how to change the default and what sort of behaviour the setting governs. +Also included is an example configuration for each setting. If you don't want to spend a lot of time +thinking about options, the config as generated sets sensible defaults for all values. Do note however that the +database defaults to SQLite, which is not recommended for production usage. You can read more on this subject +[here](../../setup/installation.md#using-postgresql). + +## Config Conventions + +Configuration options that take a time period can be set using a number +followed by a letter. Letters have the following meanings: + +* `s` = second +* `m` = minute +* `h` = hour +* `d` = day +* `w` = week +* `y` = year + +For example, setting `redaction_retention_period: 5m` would remove redacted +messages from the database after 5 minutes, rather than 5 months. + +In addition, configuration options referring to size use the following suffixes: + +* `K` = KiB, or 1024 bytes +* `M` = MiB, or 1,048,576 bytes +* `G` = GiB, or 1,073,741,824 bytes +* `T` = TiB, or 1,099,511,627,776 bytes + +For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes +for a user avatar. + +## Config Validation + +The configuration file can be validated with the following command: +```bash +python -m synapse.config read <config key to print> -c <path to config> +``` + +To validate the entire file, omit `read <config key to print>`: +```bash +python -m synapse.config -c <path to config> +``` + +To see how to set other options, check the help reference: +```bash +python -m synapse.config --help +``` + +### YAML +The configuration file is a [YAML](https://yaml.org/) file, which means that certain syntax rules +apply if you want your config file to be read properly. A few helpful things to know: +* `#` before any option in the config will comment out that setting and either a default (if available) will + be applied or Synapse will ignore the setting. Thus, in example #1 below, the setting will be read and + applied, but in example #2 the setting will not be read and a default will be applied. + + Example #1: + ```yaml + pid_file: DATADIR/homeserver.pid + ``` + Example #2: + ```yaml + #pid_file: DATADIR/homeserver.pid + ``` +* Indentation matters! The indentation before a setting + will determine whether a given setting is read as part of another + setting, or considered on its own. Thus, in example #1, the `enabled` setting + is read as a sub-option of the `presence` setting, and will be properly applied. + + However, the lack of indentation before the `enabled` setting in example #2 means + that when reading the config, Synapse will consider both `presence` and `enabled` as + different settings. In this case, `presence` has no value, and thus a default applied, and `enabled` + is an option that Synapse doesn't recognize and thus ignores. + + Example #1: + ```yaml + presence: + enabled: false + ``` + Example #2: + ```yaml + presence: + enabled: false + ``` + In this manual, all top-level settings (ones with no indentation) are identified + at the beginning of their section (i.e. "### `example_setting`") and + the sub-options, if any, are identified and listed in the body of the section. + In addition, each setting has an example of its usage, with the proper indentation + shown. +""" +SECTION_HEADERS = { + "modules": { + "title": "Modules", + "description": ( + "Server admins can expand Synapse's functionality with external " + "modules.\n\n" + "See [here](../../modules/index.md) for more documentation on how " + "to configure or create custom modules for Synapse." + ), + }, + "server_name": { + "title": "Server", + "description": "Define your homeserver name and other base options.", + }, + "admin_contact": { + "title": "Homeserver blocking", + "description": "Useful options for Synapse admins.", + }, + "tls_certificate_path": { + "title": "TLS", + "description": "Options related to TLS.", + }, + "federation_domain_whitelist": { + "title": "Federation", + "description": "Options related to federation.", + }, + "event_cache_size": { + "title": "Caching", + "description": "Options related to caching.", + }, + "database": { + "title": "Database", + "description": "Config options related to database settings.", + }, + "log_config": { + "title": "Logging", + "description": ("Config options related to logging."), + }, + "rc_message": { + "title": "Ratelimiting", + "description": ( + "Options related to ratelimiting in Synapse.\n\n" + "Each ratelimiting configuration is made of two parameters:\n" + "- `per_second`: number of requests a client can send per second.\n" + "- `burst_count`: number of requests a client can send before " + "being throttled." + ), + }, + "enable_authenticated_media": { + "title": "Media Store", + "description": "Config options related to Synapse's media store.", + }, + "recaptcha_public_key": { + "title": "Captcha", + "description": ( + "See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha." + ), + }, + "turn_uris": { + "title": "TURN", + "description": ("Options related to adding a TURN server to Synapse."), + }, + "enable_registration": { + "title": "Registration", + "description": ( + "Registration can be rate-limited using the parameters in the " + "[Ratelimiting](#ratelimiting) section of this manual." + ), + }, + "session_lifetime": { + "title": "User session management", + "description": ("Config options related to user session management."), + }, + "enable_metrics": { + "title": "Metrics", + "description": ("Config options related to metrics."), + }, + "room_prejoin_state": { + "title": "API Configuration", + "description": ("Config settings related to the client/server API."), + }, + "signing_key_path": { + "title": "Signing Keys", + "description": ("Config options relating to signing keys."), + }, + "push": { + "title": "Push", + "description": ("Configuration settings related to push notifications."), + }, + "encryption_enabled_by_default_for_room_type": { + "title": "Rooms", + "description": ("Config options relating to rooms."), + }, + "opentracing": { + "title": "Opentracing", + "description": ("Configuration options related to Opentracing support."), + }, + "worker_replication_secret": { + "title": "Coordinating workers", + "description": ( + "Configuration options related to workers which belong in the main config file (usually called `homeserver.yaml`). A Synapse deployment can scale horizontally by running multiple Synapse processes called _workers_. Incoming requests are distributed between workers to handle higher loads. Some workers are privileged and can accept requests from other workers.\n\n" + "As a result, the worker configuration is divided into two parts.\n\n" + "1. The first part (in this section of the manual) defines which shardable tasks are delegated to privileged workers. This allows unprivileged workers to make requests to a privileged worker to act on their behalf.\n" + "2. [The second part](#individual-worker-configuration) controls the behaviour of individual workers in isolation.\n\n" + "For guidance on setting up workers, see the [worker documentation](../../workers.md)." + ), + }, + "worker_app": { + "title": "Individual worker configuration", + "description": ( + "These options configure an individual worker, in its worker configuration file. They should be not be provided when configuring the main process.\n\n" + "Note also the configuration above for [coordinating a cluster of workers](#coordinating-workers).\n\n" + "For guidance on setting up workers, see the [worker documentation](../../workers.md)." + ), + }, + "background_updates": { + "title": "Background Updates", + "description": ("Configuration settings related to background updates."), + }, + "auto_accept_invites": { + "title": "Auto Accept Invites", + "description": ( + "Configuration settings related to automatically accepting invites." + ), + }, +} +INDENT = " " + + +has_error = False + + +def error(text: str) -> None: + global has_error + print(f"ERROR: {text}", file=sys.stderr) + has_error = True + + +def indent(text: str, first_line: bool = True) -> str: + """Indents each non-empty line of the given text.""" + text = re.sub(r"(\n)([^\n])", r"\1" + INDENT + r"\2", text) + if first_line: + text = re.sub(r"^([^\n])", INDENT + r"\1", text) + + return text + + +def em(s: Optional[str]) -> str: + """Add emphasis to text.""" + return f"*{s}*" if s else "" + + +def a(s: Optional[str], suffix: str = " ") -> str: + """Appends a space if the given string is not empty.""" + return s + suffix if s else "" + + +def p(s: Optional[str], prefix: str = " ") -> str: + """Prepend a space if the given string is not empty.""" + return prefix + s if s else "" + + +def resolve_local_refs(schema: dict) -> dict: + """Returns the given schema with local $ref properties replaced by their keywords. + + Crude approximation that will override keywords. + """ + defs = schema["$defs"] + + def replace_ref(d: Any) -> Any: + if isinstance(d, dict): + the_def = {} + if "$ref" in d: + # Found a "$ref" key. + def_name = d["$ref"].removeprefix("#/$defs/") + del d["$ref"] + the_def = defs[def_name] + + new_dict = {k: replace_ref(v) for k, v in d.items()} + if common_keys := (new_dict.keys() & the_def.keys()) - {"properties"}: + print( + f"WARN: '{def_name}' overrides keys '{common_keys}'", + file=sys.stderr, + ) + + new_dict_props = new_dict.get("properties", {}) + the_def_props = the_def.get("properties", {}) + if common_props := new_dict_props.keys() & the_def_props.keys(): + print( + f"WARN: '{def_name}' overrides properties '{common_props}'", + file=sys.stderr, + ) + if merged_props := {**new_dict_props, **the_def_props}: + return {**new_dict, **the_def, "properties": merged_props} + else: + return {**new_dict, **the_def} + + elif isinstance(d, list): + return [replace_ref(v) for v in d] + else: + return d + + return replace_ref(schema) + + +def sep(values: dict) -> str: + """Separator between parts of the description.""" + # If description is multiple paragraphs already, add new ones. Otherwise + # append to same paragraph. + return "\n\n" if "\n\n" in values.get("description", "") else " " + + +def type_str(values: dict) -> str: + """Type of the current value.""" + if t := values.get("io.element.type_name"): + # Allow custom overrides for the type name, for documentation clarity + return f"({t})" + if not (t := values.get("type")): + return "" + if not isinstance(t, list): + t = [t] + joined = "|".join(t) + return f"({joined})" + + +def items(values: dict) -> str: + """A block listing properties of array items.""" + if not (items := values.get("items")): + return "" + if not (item_props := items.get("properties")): + return "" + return "\nOptions for each entry include:\n\n" + "\n".join( + sub_section(k, v) for k, v in item_props.items() + ) + + +def properties(values: dict) -> str: + """A block listing object properties.""" + if not (properties := values.get("properties")): + return "" + return "\nThis setting has the following sub-options:\n\n" + "\n".join( + sub_section(k, v) for k, v in properties.items() + ) + + +def sub_section(prop: str, values: dict) -> str: + """Formats a bullet point about the given sub-property.""" + sep = lambda: globals()["sep"](values) + type_str = lambda: globals()["type_str"](values) + items = lambda: globals()["items"](values) + properties = lambda: globals()["properties"](values) + + def default() -> str: + try: + default = values["default"] + return f"Defaults to `{json.dumps(default)}`." + except KeyError: + return "" + + def description() -> str: + if not (description := values.get("description")): + error(f"missing description for {prop}") + return "MISSING DESCRIPTION\n" + + return f"{description}{p(default(), sep())}\n" + + return ( + f"* `{prop}`{p(type_str())}: " + + f"{indent(description(), first_line=False)}" + + indent(items()) + + indent(properties()) + ) + + +def section(prop: str, values: dict) -> str: + """Formats a section about the given property.""" + sep = lambda: globals()["sep"](values) + type_str = lambda: globals()["type_str"](values) + items = lambda: globals()["items"](values) + properties = lambda: globals()["properties"](values) + + def is_simple_default() -> bool: + """Whether the given default is simple enough for a one-liner.""" + if not (d := values.get("default")): + return True + return not isinstance(d, dict) and not isinstance(d, list) + + def default_str() -> str: + try: + default = values["default"] + except KeyError: + t = values.get("type", []) + if "object" == t or "object" in t: + # Skip objects as they probably have child defaults. + return "" + return "There is no default for this option." + + if not is_simple_default(): + # Show complex defaults as a code block instead. + return "" + return f"Defaults to `{json.dumps(default)}`." + + def header() -> str: + try: + title = SECTION_HEADERS[prop]["title"] + description = SECTION_HEADERS[prop]["description"] + return f"## {title}\n\n{description}\n\n---\n" + except KeyError: + return "" + + def title() -> str: + return f"### `{prop}`\n" + + def description() -> str: + if not (description := values.get("description")): + error(f"missing description for {prop}") + return "MISSING DESCRIPTION\n" + return f"\n{a(em(type_str()))}{description}{p(default_str(), sep())}\n" + + def example_str(example: Any) -> str: + return "```yaml\n" + f"{yaml.dump({prop: example}, sort_keys=False)}" + "```\n" + + def default_example() -> str: + if is_simple_default(): + return "" + default_cfg = example_str(values["default"]) + return f"\nDefault configuration:\n{default_cfg}" + + def examples() -> str: + if not (examples := values.get("examples")): + return "" + + examples_str = "\n".join(example_str(e) for e in examples) + + if len(examples) >= 2: + return f"\nExample configurations:\n{examples_str}" + else: + return f"\nExample configuration:\n{examples_str}" + + def post_description() -> str: + # Sometimes it's helpful to have a description after the list of fields, + # e.g. with a subsection that consists only of text. + # This helps with that. + if not (description := values.get("io.element.post_description")): + return "" + return f"\n{description}\n\n" + + return ( + "---\n" + + header() + + title() + + description() + + items() + + properties() + + default_example() + + examples() + + post_description() + ) + + +def main() -> None: + def usage(err_msg: str) -> int: + script_name = (sys.argv[:1] or ["__main__.py"])[0] + print(err_msg, file=sys.stderr) + print(f"Usage: {script_name} <JSON Schema file>", file=sys.stderr) + print(f"\n{__doc__}", file=sys.stderr) + exit(1) + + def read_json_file_arg() -> Any: + if len(sys.argv) > 2: + exit(usage("Too many arguments.")) + if not (filepath := (sys.argv[1:] or [""])[0]): + exit(usage("No schema file provided.")) + with open(filepath) as f: + return yaml.safe_load(f) + + schema = read_json_file_arg() + schema = resolve_local_refs(schema) + + sections = (section(k, v) for k, v in schema["properties"].items()) + print(HEADER + "".join(sections), end="") + + if has_error: + print("There were errors.", file=sys.stderr) + exit(2) + + +if __name__ == "__main__": + main() diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh
index 8acf0a6fb8..7096100a3e 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh
@@ -1,8 +1,9 @@ #!/usr/bin/env bash # # Runs linting scripts over the local Synapse checkout -# black - opinionated code formatter # ruff - lints and finds mistakes +# mypy - typechecks python code +# cargo clippy - lints rust code set -e @@ -101,12 +102,6 @@ echo # Print out the commands being run set -x -# Ensure the sort order of imports. -isort "${files[@]}" - -# Ensure Python code conforms to an opinionated style. -python3 -m black "${files[@]}" - # Ensure the sample configuration file conforms to style checks. ./scripts-dev/config-lint.sh @@ -114,6 +109,9 @@ python3 -m black "${files[@]}" # --quiet suppresses the update check. ruff check --quiet --fix "${files[@]}" +# Reformat Python code. +ruff format --quiet "${files[@]}" + # Catch any common programming mistakes in Rust code. # # --bins, --examples, --lib, --tests combined explicitly disable checking @@ -141,3 +139,6 @@ cargo-fmt # Ensure type hints are correct. mypy + +# Generate configuration documentation from the JSON Schema +./scripts-dev/gen_config_documentation.py schema/synapse-config.schema.yaml > docs/usage/configuration/config_documentation.md diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py
index 877b831751..a15c3c005c 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py
@@ -38,6 +38,7 @@ from mypy.types import ( NoneType, TupleType, TypeAliasType, + TypeVarType, UninhabitedType, UnionType, ) @@ -233,6 +234,7 @@ IMMUTABLE_CUSTOM_TYPES = { "synapse.synapse_rust.push.FilteredPushRules", # This is technically not immutable, but close enough. "signedjson.types.VerifyKey", + "synapse.types.StrCollection", } # Immutable containers only if the values are also immutable. @@ -298,7 +300,7 @@ def is_cacheable( elif rt.type.fullname in MUTABLE_CONTAINER_TYPES: # Mutable containers are mutable regardless of their underlying type. - return False, None + return False, f"container {rt.type.fullname} is mutable" elif "attrs" in rt.type.metadata: # attrs classes are only cachable iff it is frozen (immutable itself) @@ -318,6 +320,9 @@ def is_cacheable( else: return False, "non-frozen attrs class" + elif rt.type.is_enum: + # We assume Enum values are immutable + return True, None else: # Ensure we fail for unknown types, these generally means that the # above code is not complete. @@ -326,6 +331,18 @@ def is_cacheable( f"Don't know how to handle {rt.type.fullname} return type instance", ) + elif isinstance(rt, TypeVarType): + # We consider TypeVars immutable if they are bound to a set of immutable + # types. + if rt.values: + for value in rt.values: + ok, note = is_cacheable(value, signature, verbose) + if not ok: + return False, f"TypeVar bound not cacheable {value}" + return True, None + + return False, "TypeVar is unbound" + elif isinstance(rt, NoneType): # None is cachable. return True, None @@ -343,7 +360,7 @@ def is_cacheable( # For a type alias, check if the underlying real type is cachable. return is_cacheable(mypy.types.get_proper_type(rt), signature, verbose) - elif isinstance(rt, UninhabitedType) and rt.is_noreturn: + elif isinstance(rt, UninhabitedType): # There is no return value, just consider it cachable. This is only used # in tests. return True, None diff --git a/scripts-dev/release.py b/scripts-dev/release.py
index 5e519bb758..5de5814b17 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py
@@ -20,8 +20,7 @@ # # -"""An interactive script for doing a release. See `cli()` below. -""" +"""An interactive script for doing a release. See `cli()` below.""" import glob import json @@ -41,7 +40,7 @@ import commonmark import git from click.exceptions import ClickException from git import GitCommandError, Repo -from github import Github +from github import BadCredentialsException, Github from packaging import version @@ -255,6 +254,12 @@ def _prepare() -> None: # Update the version specified in pyproject.toml. subprocess.check_output(["poetry", "version", new_version]) + # Update config schema $id. + schema_file = "schema/synapse-config.schema.yaml" + major_minor_version = ".".join(new_version.split(".")[:2]) + url = f"https://element-hq.github.io/synapse/schema/synapse/v{major_minor_version}/synapse-config.schema.json" + subprocess.check_output(["sed", "-i", f"0,/^\\$id: .*/s||$id: {url}|", schema_file]) + # Generate changelogs. generate_and_write_changelog(synapse_repo, current_version, new_version) @@ -324,6 +329,9 @@ def tag(gh_token: Optional[str]) -> None: def _tag(gh_token: Optional[str]) -> None: """Tags the release and generates a draft GitHub release""" + # Test that the GH Token is valid before continuing. + check_valid_gh_token(gh_token) + # Make sure we're in a git repo. repo = get_repo_and_check_clean_checkout() @@ -418,6 +426,11 @@ def publish(gh_token: str) -> None: def _publish(gh_token: str) -> None: """Publish release on GitHub.""" + if gh_token: + # Test that the GH Token is valid before continuing. + gh = Github(gh_token) + gh.get_user() + # Make sure we're in a git repo. get_repo_and_check_clean_checkout() @@ -460,6 +473,9 @@ def upload(gh_token: Optional[str]) -> None: def _upload(gh_token: Optional[str]) -> None: """Upload release to pypi.""" + # Test that the GH Token is valid before continuing. + check_valid_gh_token(gh_token) + current_version = get_package_version() tag_name = f"v{current_version}" @@ -555,6 +571,9 @@ def wait_for_actions(gh_token: Optional[str]) -> None: def _wait_for_actions(gh_token: Optional[str]) -> None: + # Test that the GH Token is valid before continuing. + check_valid_gh_token(gh_token) + # Find out the version and tag name. current_version = get_package_version() tag_name = f"v{current_version}" @@ -579,7 +598,7 @@ def _wait_for_actions(gh_token: Optional[str]) -> None: if all( workflow["status"] != "in_progress" for workflow in resp["workflow_runs"] ): - success = ( + success = all( workflow["status"] == "completed" for workflow in resp["workflow_runs"] ) if success: @@ -711,6 +730,11 @@ Ask the designated people to do the blog and tweets.""" @cli.command() @click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True) def full(gh_token: str) -> None: + if gh_token: + # Test that the GH Token is valid before continuing. + gh = Github(gh_token) + gh.get_user() + click.echo("1. If this is a security release, read the security wiki page.") click.echo("2. Check for any release blockers before proceeding.") click.echo(" https://github.com/element-hq/synapse/labels/X-Release-Blocker") @@ -782,6 +806,22 @@ def get_repo_and_check_clean_checkout( return repo +def check_valid_gh_token(gh_token: Optional[str]) -> None: + """Check that a github token is valid, if supplied""" + + if not gh_token: + # No github token supplied, so nothing to do. + return + + try: + gh = Github(gh_token) + + # We need to lookup name to trigger a request. + _name = gh.get_user().name + except BadCredentialsException as e: + raise click.ClickException(f"Github credentials are bad: {e}") + + def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]: """Find the branch/ref, looking first locally then in the remote.""" if ref_name in repo.references: diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi
index a141218d3d..c9a4114b1e 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi
@@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Contains *incomplete* type hints for txredisapi. -""" +"""Contains *incomplete* type hints for txredisapi.""" + from typing import Any, List, Optional, Type, Union from twisted.internet import protocol diff --git a/synapse/__init__.py b/synapse/__init__.py
index 99ed7a5374..e7784ac5d7 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py
@@ -20,8 +20,7 @@ # # -""" This is an implementation of a Matrix homeserver. -""" +"""This is an implementation of a Matrix homeserver.""" import os import sys @@ -40,8 +39,8 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True # Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the # if-statement completely. py_version = sys.version_info -if py_version < (3, 8): - print("Synapse requires Python 3.8 or above.") +if py_version < (3, 9): + print("Synapse requires Python 3.9 or above.") sys.exit(1) # Allow using the asyncio reactor via env var. diff --git a/synapse/_pydantic_compat.py b/synapse/_pydantic_compat.py
index a6ceeb04d2..f0eedf5c6d 100644 --- a/synapse/_pydantic_compat.py +++ b/synapse/_pydantic_compat.py
@@ -19,6 +19,8 @@ # # +from typing import TYPE_CHECKING + from packaging.version import Version try: @@ -30,4 +32,64 @@ except ImportError: HAS_PYDANTIC_V2: bool = Version(pydantic_version).major == 2 -__all__ = ("HAS_PYDANTIC_V2",) +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import ( + BaseModel, + Extra, + Field, + MissingError, + PydanticValueError, + StrictBool, + StrictInt, + StrictStr, + ValidationError, + conbytes, + confloat, + conint, + constr, + parse_obj_as, + validator, + ) + from pydantic.v1.error_wrappers import ErrorWrapper + from pydantic.v1.typing import get_args +else: + from pydantic import ( + BaseModel, + Extra, + Field, + MissingError, + PydanticValueError, + StrictBool, + StrictInt, + StrictStr, + ValidationError, + conbytes, + confloat, + conint, + constr, + parse_obj_as, + validator, + ) + from pydantic.error_wrappers import ErrorWrapper + from pydantic.typing import get_args + +__all__ = ( + "HAS_PYDANTIC_V2", + "BaseModel", + "constr", + "conbytes", + "conint", + "confloat", + "ErrorWrapper", + "Extra", + "Field", + "get_args", + "MissingError", + "parse_obj_as", + "PydanticValueError", + "StrictBool", + "StrictInt", + "StrictStr", + "ValidationError", + "validator", +) diff --git a/synapse/_scripts/generate_workers_map.py b/synapse/_scripts/generate_workers_map.py
index 715c7ddc17..09feb8cf30 100755 --- a/synapse/_scripts/generate_workers_map.py +++ b/synapse/_scripts/generate_workers_map.py
@@ -171,7 +171,7 @@ def elide_http_methods_if_unconflicting( """ def paths_to_methods_dict( - methods_and_paths: Iterable[Tuple[str, str]] + methods_and_paths: Iterable[Tuple[str, str]], ) -> Dict[str, Set[str]]: """ Given (method, path) pairs, produces a dict from path to set of methods @@ -201,7 +201,7 @@ def elide_http_methods_if_unconflicting( def simplify_path_regexes( - registrations: Dict[Tuple[str, str], EndpointDescription] + registrations: Dict[Tuple[str, str], EndpointDescription], ) -> Dict[Tuple[str, str], EndpointDescription]: """ Simplify all the path regexes for the dict of endpoint descriptions, diff --git a/synapse/_scripts/hash_password.py b/synapse/_scripts/hash_password.py
index 3bed367be2..2b7d3585cb 100755 --- a/synapse/_scripts/hash_password.py +++ b/synapse/_scripts/hash_password.py
@@ -56,7 +56,9 @@ def main() -> None: password_pepper = password_config.get("pepper", password_pepper) password = args.password - if not password: + if not password and not sys.stdin.isatty(): + password = sys.stdin.readline().strip() + elif not password: password = prompt_for_pass() # On Python 2, make sure we decode it to Unicode before we normalise it diff --git a/synapse/_scripts/review_recent_signups.py b/synapse/_scripts/review_recent_signups.py
index ad88df477a..03bd58a1a1 100644 --- a/synapse/_scripts/review_recent_signups.py +++ b/synapse/_scripts/review_recent_signups.py
@@ -40,6 +40,7 @@ from synapse.storage.engines import create_engine class ReviewConfig(RootConfig): "A config class that just pulls out the database config" + config_classes = [DatabaseConfig] @@ -73,13 +74,6 @@ def get_recent_users( user_infos = [UserInfo(user_id, creation_ts) for user_id, creation_ts in txn] for user_info in user_infos: - user_info.emails = DatabasePool.simple_select_onecol_txn( - txn, - table="user_threepids", - keyvalues={"user_id": user_info.user_id, "medium": "email"}, - retcol="address", - ) - sql = """ SELECT room_id, canonical_alias, name, join_rules FROM local_current_membership @@ -160,7 +154,11 @@ def main() -> None: with make_conn(database_config, engine, "review_recent_signups") as db_conn: # This generates a type of Cursor, not LoggingTransaction. - user_infos = get_recent_users(db_conn.cursor(), since_ms, exclude_users_with_appservice) # type: ignore[arg-type] + user_infos = get_recent_users( + db_conn.cursor(), + since_ms, # type: ignore[arg-type] + exclude_users_with_appservice, + ) for user_info in user_infos: if exclude_users_with_email and user_info.emails: diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py
index 5c6db8118f..573c70696e 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py
@@ -42,12 +42,12 @@ from typing import ( Set, Tuple, Type, + TypedDict, TypeVar, cast, ) import yaml -from typing_extensions import TypedDict from twisted.internet import defer, reactor as reactor_ @@ -88,6 +88,7 @@ from synapse.storage.databases.main.relations import RelationsWorkerStore from synapse.storage.databases.main.room import RoomBackgroundUpdateStore from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore from synapse.storage.databases.main.search import SearchBackgroundUpdateStore +from synapse.storage.databases.main.sliding_sync import SlidingSyncStore from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.user_directory import ( @@ -127,8 +128,14 @@ BOOLEAN_COLUMNS = { "pushers": ["enabled"], "redactions": ["have_censored"], "remote_media_cache": ["authenticated"], + "room_memberships": ["participant"], "room_stats_state": ["is_federatable"], "rooms": ["is_public", "has_auth_chain_index"], + "sliding_sync_joined_rooms": ["is_encrypted"], + "sliding_sync_membership_snapshots": [ + "has_known_state", + "is_encrypted", + ], "users": ["shadow_banned", "approved", "locked", "suspended"], "un_partial_stated_event_stream": ["rejection_status_changed"], "users_who_share_rooms": ["share_private"], @@ -185,6 +192,11 @@ APPEND_ONLY_TABLES = [ IGNORED_TABLES = { + # Porting the auto generated sequence in this table is non-trivial. + # None of the entries in this list are mandatory for Synapse to keep working. + # If state group disk space is an issue after the port, the + # `mark_unreferenced_state_groups_for_deletion_bg_update` background task can be run again. + "state_groups_pending_deletion", # We don't port these tables, as they're a faff and we can regenerate # them anyway. "user_directory", @@ -210,6 +222,15 @@ IGNORED_TABLES = { } +# These background updates will not be applied upon creation of the postgres database. +IGNORED_BACKGROUND_UPDATES = { + # Reapplying this background update to the postgres database is unnecessary after + # already having waited for the SQLite database to complete all running background + # updates. + "mark_unreferenced_state_groups_for_deletion_bg_update", +} + + # Error returned by the run function. Used at the top-level part of the script to # handle errors and return codes. end_error: Optional[str] = None @@ -250,6 +271,7 @@ class Store( ReceiptsBackgroundUpdateStore, RelationsWorkerStore, EventFederationWorkerStore, + SlidingSyncStore, ): def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]: return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) @@ -680,6 +702,20 @@ class Porter: # 0 means off. 1 means full. 2 means incremental. return autovacuum_setting != 0 + async def remove_ignored_background_updates_from_database(self) -> None: + def _remove_delete_unreferenced_state_groups_bg_updates( + txn: LoggingTransaction, + ) -> None: + txn.execute( + "DELETE FROM background_updates WHERE update_name = ANY(?)", + (list(IGNORED_BACKGROUND_UPDATES),), + ) + + await self.postgres_store.db_pool.runInteraction( + "remove_delete_unreferenced_state_groups_bg_updates", + _remove_delete_unreferenced_state_groups_bg_updates, + ) + async def run(self) -> None: """Ports the SQLite database to a PostgreSQL database. @@ -712,9 +748,7 @@ class Porter: return # Check if all background updates are done, abort if not. - updates_complete = ( - await self.sqlite_store.db_pool.updates.has_completed_background_updates() - ) + updates_complete = await self.sqlite_store.db_pool.updates.has_completed_background_updates() if not updates_complete: end_error = ( "Pending background updates exist in the SQLite3 database." @@ -727,6 +761,8 @@ class Porter: self.hs_config.database.get_single_database() ) + await self.remove_ignored_background_updates_from_database() + await self.run_background_updates_on_postgres() self.progress.set_state("Creating port tables") @@ -1029,7 +1065,7 @@ class Porter: def get_sent_table_size(txn: LoggingTransaction) -> int: txn.execute( - "SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,) + "SELECT count(*) FROM sent_transactions WHERE ts >= ?", (yesterday,) ) result = txn.fetchone() assert result is not None @@ -1090,10 +1126,10 @@ class Porter: return done, remaining + done async def _setup_state_group_id_seq(self) -> None: - curr_id: Optional[int] = ( - await self.sqlite_store.db_pool.simple_select_one_onecol( - table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True - ) + curr_id: Optional[ + int + ] = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="state_groups", keyvalues={}, retcol="MAX(id)", allow_none=True ) if not curr_id: @@ -1181,13 +1217,13 @@ class Porter: ) async def _setup_auth_chain_sequence(self) -> None: - curr_chain_id: Optional[int] = ( - await self.sqlite_store.db_pool.simple_select_one_onecol( - table="event_auth_chains", - keyvalues={}, - retcol="MAX(chain_id)", - allow_none=True, - ) + curr_chain_id: Optional[ + int + ] = await self.sqlite_store.db_pool.simple_select_one_onecol( + table="event_auth_chains", + keyvalues={}, + retcol="MAX(chain_id)", + allow_none=True, ) def r(txn: LoggingTransaction) -> None: diff --git a/synapse/_scripts/synctl.py b/synapse/_scripts/synctl.py
index 688df9485c..2e2aa27a17 100755 --- a/synapse/_scripts/synctl.py +++ b/synapse/_scripts/synctl.py
@@ -292,9 +292,9 @@ def main() -> None: for key in worker_config: if key == "worker_app": # But we allow worker_app continue - assert not key.startswith( - "worker_" - ), "Main process cannot use worker_* config" + assert not key.startswith("worker_"), ( + "Main process cannot use worker_* config" + ) else: worker_pidfile = worker_config["worker_pid_file"] worker_cache_factor = worker_config.get("synctl_cache_factor") diff --git a/synapse/api/auth/__init__.py b/synapse/api/auth/__init__.py
index d5241afe73..1b801d3ad3 100644 --- a/synapse/api/auth/__init__.py +++ b/synapse/api/auth/__init__.py
@@ -18,9 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Optional, Tuple - -from typing_extensions import Protocol +from typing import TYPE_CHECKING, Optional, Protocol, Tuple from twisted.web.server import Request diff --git a/synapse/api/auth/msc3861_delegated.py b/synapse/api/auth/msc3861_delegated.py
index 7361666c77..e500a06afe 100644 --- a/synapse/api/auth/msc3861_delegated.py +++ b/synapse/api/auth/msc3861_delegated.py
@@ -19,7 +19,8 @@ # # import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional from urllib.parse import urlencode from authlib.oauth2 import ClientAuth @@ -38,15 +39,16 @@ from synapse.api.errors import ( HttpResponseException, InvalidClientTokenError, OAuthInsufficientScopeError, - StoreError, SynapseError, UnrecognizedRequestError, ) from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable +from synapse.logging.opentracing import active_span, force_tracing, start_active_span from synapse.types import Requester, UserID, create_requester from synapse.util import json_decoder from synapse.util.caches.cached_call import RetryOnExceptionCachedCall +from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext if TYPE_CHECKING: from synapse.rest.admin.experimental_features import ExperimentalFeature @@ -76,6 +78,61 @@ def scope_to_list(scope: str) -> List[str]: return scope.strip().split(" ") +@dataclass +class IntrospectionResult: + _inner: IntrospectionToken + + # when we retrieved this token, + # in milliseconds since the Unix epoch + retrieved_at_ms: int + + def is_active(self, now_ms: int) -> bool: + if not self._inner.get("active"): + return False + + expires_in = self._inner.get("expires_in") + if expires_in is None: + return True + if not isinstance(expires_in, int): + raise InvalidClientTokenError("token `expires_in` is not an int") + + absolute_expiry_ms = expires_in * 1000 + self.retrieved_at_ms + return now_ms < absolute_expiry_ms + + def get_scope_list(self) -> List[str]: + value = self._inner.get("scope") + if not isinstance(value, str): + return [] + return scope_to_list(value) + + def get_sub(self) -> Optional[str]: + value = self._inner.get("sub") + if not isinstance(value, str): + return None + return value + + def get_username(self) -> Optional[str]: + value = self._inner.get("username") + if not isinstance(value, str): + return None + return value + + def get_name(self) -> Optional[str]: + value = self._inner.get("name") + if not isinstance(value, str): + return None + return value + + def get_device_id(self) -> Optional[str]: + value = self._inner.get("device_id") + if value is not None and not isinstance(value, str): + raise AuthError( + 500, + "Invalid device ID in introspection result", + ) + return value + + class PrivateKeyJWTWithKid(PrivateKeyJWT): # type: ignore[misc] """An implementation of the private_key_jwt client auth method that includes a kid header. @@ -119,9 +176,39 @@ class MSC3861DelegatedAuth(BaseAuth): self._clock = hs.get_clock() self._http_client = hs.get_proxied_http_client() self._hostname = hs.hostname - self._admin_token = self._config.admin_token + self._admin_token: Callable[[], Optional[str]] = self._config.admin_token + self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users + + # # Token Introspection Cache + # This remembers what users/devices are represented by which access tokens, + # in order to reduce overall system load: + # - on Synapse (as requests are relatively expensive) + # - on the network + # - on MAS + # + # Since there is no invalidation mechanism currently, + # the entries expire after 2 minutes. + # This does mean tokens can be treated as valid by Synapse + # for longer than reality. + # + # Ideally, tokens should logically be invalidated in the following circumstances: + # - If a session logout happens. + # In this case, MAS will delete the device within Synapse + # anyway and this is good enough as an invalidation. + # - If the client refreshes their token in MAS. + # In this case, the device still exists and it's not the end of the world for + # the old access token to continue working for a short time. + self._introspection_cache: ResponseCache[str] = ResponseCache( + self._clock, + "token_introspection", + timeout_ms=120_000, + # don't log because the keys are access tokens + enable_logging=False, + ) - self._issuer_metadata = RetryOnExceptionCachedCall(self._load_metadata) + self._issuer_metadata = RetryOnExceptionCachedCall[OpenIDProviderMetadata]( + self._load_metadata + ) if isinstance(auth_method, PrivateKeyJWTWithKid): # Use the JWK as the client secret when using the private_key_jwt method @@ -131,9 +218,10 @@ class MSC3861DelegatedAuth(BaseAuth): ) else: # Else use the client secret - assert self._config.client_secret, "No client_secret provided" + client_secret = self._config.client_secret() + assert client_secret, "No client_secret provided" self._client_auth = ClientAuth( - self._config.client_id, self._config.client_secret, auth_method + self._config.client_id, client_secret, auth_method ) async def _load_metadata(self) -> OpenIDProviderMetadata: @@ -145,6 +233,39 @@ class MSC3861DelegatedAuth(BaseAuth): # metadata.validate_introspection_endpoint() return metadata + async def issuer(self) -> str: + """ + Get the configured issuer + + This will use the issuer value set in the metadata, + falling back to the one set in the config if not set in the metadata + """ + metadata = await self._issuer_metadata.get() + return metadata.issuer or self._config.issuer + + async def account_management_url(self) -> Optional[str]: + """ + Get the configured account management URL + + This will discover the account management URL from the issuer if it's not set in the config + """ + if self._config.account_management_url is not None: + return self._config.account_management_url + + try: + metadata = await self._issuer_metadata.get() + return metadata.get("account_management_uri", None) + # We don't want to raise here if we can't load the metadata + except Exception: + logger.warning("Failed to load metadata:", exc_info=True) + return None + + async def auth_metadata(self) -> Dict[str, Any]: + """ + Returns the auth metadata dict + """ + return await self._issuer_metadata.get() + async def _introspection_endpoint(self) -> str: """ Returns the introspection endpoint of the issuer @@ -154,10 +275,12 @@ class MSC3861DelegatedAuth(BaseAuth): if self._config.introspection_endpoint is not None: return self._config.introspection_endpoint - metadata = await self._load_metadata() + metadata = await self._issuer_metadata.get() return metadata.get("introspection_endpoint") - async def _introspect_token(self, token: str) -> IntrospectionToken: + async def _introspect_token( + self, token: str, cache_context: ResponseCacheContext[str] + ) -> IntrospectionResult: """ Send a token to the introspection endpoint and returns the introspection response @@ -173,11 +296,16 @@ class MSC3861DelegatedAuth(BaseAuth): Returns: The introspection response """ + # By default, we shouldn't cache the result unless we know it's valid + cache_context.should_cache = False introspection_endpoint = await self._introspection_endpoint() raw_headers: Dict[str, str] = { "Content-Type": "application/x-www-form-urlencoded", "User-Agent": str(self._http_client.user_agent, "utf-8"), "Accept": "application/json", + # Tell MAS that we support reading the device ID as an explicit + # value, not encoded in the scope. This is supported by MAS 0.15+ + "X-MAS-Supports-Device-Id": "1", } args = {"token": token, "token_type_hint": "access_token"} @@ -227,7 +355,11 @@ class MSC3861DelegatedAuth(BaseAuth): "The introspection endpoint returned an invalid JSON response." ) - return IntrospectionToken(**resp) + # We had a valid response, so we can cache it + cache_context.should_cache = True + return IntrospectionResult( + IntrospectionToken(**resp), retrieved_at_ms=self._clock.time_msec() + ) async def is_server_admin(self, requester: Requester) -> bool: return "urn:synapse:admin:*" in requester.scope @@ -239,6 +371,55 @@ class MSC3861DelegatedAuth(BaseAuth): allow_expired: bool = False, allow_locked: bool = False, ) -> Requester: + """Get a registered user's ID. + + Args: + request: An HTTP request with an access_token query parameter. + allow_guest: If False, will raise an AuthError if the user making the + request is a guest. + allow_expired: If True, allow the request through even if the account + is expired, or session token lifetime has ended. Note that + /login will deliver access tokens regardless of expiration. + + Returns: + Resolves to the requester + Raises: + InvalidClientCredentialsError if no user by that token exists or the token + is invalid. + AuthError if access is denied for the user in the access token + """ + parent_span = active_span() + with start_active_span("get_user_by_req"): + requester = await self._wrapped_get_user_by_req( + request, allow_guest, allow_expired, allow_locked + ) + + if parent_span: + if requester.authenticated_entity in self._force_tracing_for_users: + # request tracing is enabled for this user, so we need to force it + # tracing on for the parent span (which will be the servlet span). + # + # It's too late for the get_user_by_req span to inherit the setting, + # so we also force it on for that. + force_tracing() + force_tracing(parent_span) + parent_span.set_tag( + "authenticated_entity", requester.authenticated_entity + ) + parent_span.set_tag("user_id", requester.user.to_string()) + if requester.device_id is not None: + parent_span.set_tag("device_id", requester.device_id) + if requester.app_service is not None: + parent_span.set_tag("appservice_id", requester.app_service.id) + return requester + + async def _wrapped_get_user_by_req( + self, + request: SynapseRequest, + allow_guest: bool = False, + allow_expired: bool = False, + allow_locked: bool = False, + ) -> Requester: access_token = self.get_access_token_from_request(request) requester = await self.get_appservice_user(request, access_token) @@ -248,7 +429,7 @@ class MSC3861DelegatedAuth(BaseAuth): requester = await self.get_user_by_access_token(access_token, allow_expired) # Do not record requests from MAS using the virtual `__oidc_admin` user. - if access_token != self._admin_token: + if access_token != self._admin_token(): await self._record_request(request, requester) if not allow_guest and requester.is_guest: @@ -289,7 +470,8 @@ class MSC3861DelegatedAuth(BaseAuth): token: str, allow_expired: bool = False, ) -> Requester: - if self._admin_token is not None and token == self._admin_token: + admin_token = self._admin_token() + if admin_token is not None and token == admin_token: # XXX: This is a temporary solution so that the admin API can be called by # the OIDC provider. This will be removed once we have OIDC client # credentials grant support in matrix-authentication-service. @@ -304,20 +486,22 @@ class MSC3861DelegatedAuth(BaseAuth): ) try: - introspection_result = await self._introspect_token(token) + introspection_result = await self._introspection_cache.wrap( + token, self._introspect_token, token, cache_context=True + ) except Exception: logger.exception("Failed to introspect token") raise SynapseError(503, "Unable to introspect the access token") - logger.info(f"Introspection result: {introspection_result!r}") + logger.debug("Introspection result: %r", introspection_result) # TODO: introspection verification should be more extensive, especially: # - verify the audience - if not introspection_result.get("active"): + if not introspection_result.is_active(self._clock.time_msec()): raise InvalidClientTokenError("Token is not active") # Let's look at the scope - scope: List[str] = scope_to_list(introspection_result.get("scope", "")) + scope: List[str] = introspection_result.get_scope_list() # Determine type of user based on presence of particular scopes has_user_scope = SCOPE_MATRIX_API in scope @@ -327,7 +511,7 @@ class MSC3861DelegatedAuth(BaseAuth): raise InvalidClientTokenError("No scope in token granting user rights") # Match via the sub claim - sub: Optional[str] = introspection_result.get("sub") + sub = introspection_result.get_sub() if sub is None: raise InvalidClientTokenError( "Invalid sub claim in the introspection result" @@ -340,29 +524,20 @@ class MSC3861DelegatedAuth(BaseAuth): # If we could not find a user via the external_id, it either does not exist, # or the external_id was never recorded - # TODO: claim mapping should be configurable - username: Optional[str] = introspection_result.get("username") - if username is None or not isinstance(username, str): + username = introspection_result.get_username() + if username is None: raise AuthError( 500, "Invalid username claim in the introspection result", ) user_id = UserID(username, self._hostname) - # First try to find a user from the username claim + # Try to find a user from the username claim user_info = await self.store.get_user_by_id(user_id=user_id.to_string()) if user_info is None: - # If the user does not exist, we should create it on the fly - # TODO: we could use SCIM to provision users ahead of time and listen - # for SCIM SET events if those ever become standard: - # https://datatracker.ietf.org/doc/html/draft-hunt-scim-notify-00 - - # TODO: claim mapping should be configurable - # If present, use the name claim as the displayname - name: Optional[str] = introspection_result.get("name") - - await self.store.register_user( - user_id=user_id.to_string(), create_profile_with_displayname=name + raise AuthError( + 500, + "User not found", ) # And record the sub as external_id @@ -372,42 +547,40 @@ class MSC3861DelegatedAuth(BaseAuth): else: user_id = UserID.from_string(user_id_str) - # Find device_ids in scope - # We only allow a single device_id in the scope, so we find them all in the - # scope list, and raise if there are more than one. The OIDC server should be - # the one enforcing valid scopes, so we raise a 500 if we find an invalid scope. - device_ids = [ - tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :] - for tok in scope - if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX) - ] - - if len(device_ids) > 1: - raise AuthError( - 500, - "Multiple device IDs in scope", - ) + # MAS 0.15+ will give us the device ID as an explicit value for compatibility sessions + # If present, we get it from here, if not we get it in thee scope + device_id = introspection_result.get_device_id() + if device_id is None: + # Find device_ids in scope + # We only allow a single device_id in the scope, so we find them all in the + # scope list, and raise if there are more than one. The OIDC server should be + # the one enforcing valid scopes, so we raise a 500 if we find an invalid scope. + device_ids = [ + tok[len(SCOPE_MATRIX_DEVICE_PREFIX) :] + for tok in scope + if tok.startswith(SCOPE_MATRIX_DEVICE_PREFIX) + ] + + if len(device_ids) > 1: + raise AuthError( + 500, + "Multiple device IDs in scope", + ) + + device_id = device_ids[0] if device_ids else None - device_id = device_ids[0] if device_ids else None if device_id is not None: # Sanity check the device_id if len(device_id) > 255 or len(device_id) < 1: raise AuthError( 500, - "Invalid device ID in scope", + "Invalid device ID in introspection result", ) - # Create the device on the fly if it does not exist - try: - await self.store.get_device( - user_id=user_id.to_string(), device_id=device_id - ) - except StoreError: - await self.store.store_device( - user_id=user_id.to_string(), - device_id=device_id, - initial_device_display_name="OIDC-native client", - ) + # Make sure the device exists + await self.store.get_device( + user_id=user_id.to_string(), device_id=device_id + ) # TODO: there is a few things missing in the requester here, which still need # to be figured out, like: diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py
index 303c9ba03e..a56ffd58e4 100644 --- a/synapse/api/auth_blocking.py +++ b/synapse/api/auth_blocking.py
@@ -24,7 +24,6 @@ from typing import TYPE_CHECKING, Optional from synapse.api.constants import LimitBlockingTypes, UserTypes from synapse.api.errors import Codes, ResourceLimitError -from synapse.config.server import is_threepid_reserved from synapse.types import Requester if TYPE_CHECKING: @@ -43,16 +42,13 @@ class AuthBlocking: self._admin_contact = hs.config.server.admin_contact self._max_mau_value = hs.config.server.max_mau_value self._limit_usage_by_mau = hs.config.server.limit_usage_by_mau - self._mau_limits_reserved_threepids = ( - hs.config.server.mau_limits_reserved_threepids - ) self._is_mine_server_name = hs.is_mine_server_name self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips async def check_auth_blocking( self, user_id: Optional[str] = None, - threepid: Optional[dict] = None, + threepid: Optional[str] = None, # Not used in this method, but kept for compatibility user_type: Optional[str] = None, requester: Optional[Requester] = None, ) -> None: @@ -63,12 +59,6 @@ class AuthBlocking: user_id: If present, checks for presence against existing MAU cohort - threepid: If present, checks for presence against configured - reserved threepid. Used in cases where the user is trying register - with a MAU blocked server, normally they would be rejected but their - threepid is on the reserved list. user_id and - threepid should never be set at the same time. - user_type: If present, is used to decide whether to check against certain blocking reasons like MAU. @@ -111,9 +101,8 @@ class AuthBlocking: admin_contact=self._admin_contact, limit_type=LimitBlockingTypes.HS_DISABLED, ) - if self._limit_usage_by_mau is True: - assert not (user_id and threepid) + if self._limit_usage_by_mau is True: # If the user is already part of the MAU cohort or a trial user if user_id: timestamp = await self.store.user_last_seen_monthly_active(user_id) @@ -123,11 +112,6 @@ class AuthBlocking: is_trial = await self.store.is_trial_user(user_id) if is_trial: return - elif threepid: - # If the user does not exist yet, but is signing up with a - # reserved threepid then pass auth check - if is_threepid_reserved(self._mau_limits_reserved_threepids, threepid): - return elif user_type == UserTypes.SUPPORT: # If the user does not exist yet and is of type "support", # allow registration. Support users are excluded from MAU checks. diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 7dcb1e01fd..cd2ebf2cc3 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py
@@ -29,8 +29,13 @@ from typing import Final # the max size of a (canonical-json-encoded) event MAX_PDU_SIZE = 65536 -# the "depth" field on events is limited to 2**63 - 1 -MAX_DEPTH = 2**63 - 1 +# Max/min size of ints in canonical JSON +CANONICALJSON_MAX_INT = (2**53) - 1 +CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT + +# the "depth" field on events is limited to the same as what +# canonicaljson accepts +MAX_DEPTH = CANONICALJSON_MAX_INT # the maximum length for a room alias is 255 characters MAX_ALIAS_LENGTH = 255 @@ -81,8 +86,6 @@ class RestrictedJoinRuleTypes: class LoginType: PASSWORD: Final = "m.login.password" - EMAIL_IDENTITY: Final = "m.login.email.identity" - MSISDN: Final = "m.login.msisdn" RECAPTCHA: Final = "m.login.recaptcha" TERMS: Final = "m.login.terms" SSO: Final = "m.login.sso" @@ -180,12 +183,18 @@ ServerNoticeLimitReached: Final = "m.server_notice.usage_limit_reached" class UserTypes: """Allows for user type specific behaviour. With the benefit of hindsight - 'admin' and 'guest' users should also be UserTypes. Normal users are type None + 'admin' and 'guest' users should also be UserTypes. Extra user types can be + added in the configuration. Normal users are type None or one of the extra + user types (if configured). """ SUPPORT: Final = "support" BOT: Final = "bot" - ALL_USER_TYPES: Final = (SUPPORT, BOT) + ALL_BUILTIN_USER_TYPES: Final = (SUPPORT, BOT) + """ + The user types that are built-in to Synapse. Extra user types can be + added in the configuration. + """ class RelationTypes: @@ -230,6 +239,10 @@ class EventContentFields: ROOM_NAME: Final = "name" + MEMBERSHIP: Final = "membership" + MEMBERSHIP_DISPLAYNAME: Final = "displayname" + MEMBERSHIP_AVATAR_URL: Final = "avatar_url" + # Used in m.room.guest_access events. GUEST_ACCESS: Final = "guest_access" @@ -245,6 +258,8 @@ class EventContentFields: # `m.room.encryption`` algorithm field ENCRYPTION_ALGORITHM: Final = "algorithm" + TOMBSTONE_SUCCESSOR_ROOM: Final = "replacement_room" + class EventUnsignedContentFields: """Fields found inside the 'unsigned' data on events""" @@ -269,6 +284,10 @@ class AccountDataTypes: IGNORED_USER_LIST: Final = "m.ignored_user_list" TAG: Final = "m.tag" PUSH_RULES: Final = "m.push_rules" + # MSC4155: Invite filtering + MSC4155_INVITE_PERMISSION_CONFIG: Final = ( + "org.matrix.msc4155.invite_permission_config" + ) class HistoryVisibility: @@ -314,3 +333,8 @@ class ApprovalNoticeMedium: class Direction(enum.Enum): BACKWARDS = "b" FORWARDS = "f" + + +class ProfileFields: + DISPLAYNAME: Final = "displayname" + AVATAR_URL: Final = "avatar_url" diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index e6efa7a424..a095fb195b 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py
@@ -65,11 +65,8 @@ class Codes(str, Enum): INVALID_PARAM = "M_INVALID_PARAM" TOO_LARGE = "M_TOO_LARGE" EXCLUSIVE = "M_EXCLUSIVE" - THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED" - THREEPID_IN_USE = "M_THREEPID_IN_USE" - THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND" - THREEPID_DENIED = "M_THREEPID_DENIED" INVALID_USERNAME = "M_INVALID_USERNAME" + THREEPID_MEDIUM_NOT_SUPPORTED = "M_THREEPID_MEDIUM_NOT_SUPPORTED" # Kept around for throwing when 3PID is attempted SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED" CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN" CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM" @@ -87,8 +84,7 @@ class Codes(str, Enum): WEAK_PASSWORD = "M_WEAK_PASSWORD" INVALID_SIGNATURE = "M_INVALID_SIGNATURE" USER_DEACTIVATED = "M_USER_DEACTIVATED" - # USER_LOCKED = "M_USER_LOCKED" - USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED" + USER_LOCKED = "M_USER_LOCKED" NOT_YET_UPLOADED = "M_NOT_YET_UPLOADED" CANNOT_OVERWRITE_MEDIA = "M_CANNOT_OVERWRITE_MEDIA" @@ -101,8 +97,9 @@ class Codes(str, Enum): # The account has been suspended on the server. # By opposition to `USER_DEACTIVATED`, this is a reversible measure # that can possibly be appealed and reverted. - # Part of MSC3823. - USER_ACCOUNT_SUSPENDED = "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" + # Introduced by MSC3823 + # https://github.com/matrix-org/matrix-spec-proposals/pull/3823 + USER_ACCOUNT_SUSPENDED = "M_USER_SUSPENDED" BAD_ALIAS = "M_BAD_ALIAS" # For restricted join rules. @@ -132,6 +129,13 @@ class Codes(str, Enum): # connection. UNKNOWN_POS = "M_UNKNOWN_POS" + # Part of MSC4133 + PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE" + KEY_TOO_LARGE = "M_KEY_TOO_LARGE" + + # Part of MSC4155 + INVITE_BLOCKED = "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED" + class CodeMessageException(RuntimeError): """An exception with integer code, a message string attributes and optional headers. @@ -573,13 +577,6 @@ class UnsupportedRoomVersionError(SynapseError): ) -class ThreepidValidationError(SynapseError): - """An error raised when there was a problem authorising an event.""" - - def __init__(self, msg: str, errcode: str = Codes.FORBIDDEN): - super().__init__(400, msg, errcode) - - class IncompatibleRoomVersionError(SynapseError): """A server is trying to join a room whose version it does not support. diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index b80630c5d3..4f3bf8f770 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py
@@ -20,8 +20,7 @@ # # -from collections import OrderedDict -from typing import Hashable, Optional, Tuple +from typing import TYPE_CHECKING, Dict, Hashable, Optional, Tuple from synapse.api.errors import LimitExceededError from synapse.config.ratelimiting import RatelimitSettings @@ -29,6 +28,12 @@ from synapse.storage.databases.main import DataStore from synapse.types import Requester from synapse.util import Clock +if TYPE_CHECKING: + # To avoid circular imports: + from synapse.module_api.callbacks.ratelimit_callbacks import ( + RatelimitModuleApiCallbacks, + ) + class Ratelimiter: """ @@ -73,19 +78,23 @@ class Ratelimiter: store: DataStore, clock: Clock, cfg: RatelimitSettings, + ratelimit_callbacks: Optional["RatelimitModuleApiCallbacks"] = None, ): self.clock = clock self.rate_hz = cfg.per_second self.burst_count = cfg.burst_count self.store = store self._limiter_name = cfg.key + self._ratelimit_callbacks = ratelimit_callbacks - # An ordered dictionary representing the token buckets tracked by this rate + # A dictionary representing the token buckets tracked by this rate # limiter. Each entry maps a key of arbitrary type to a tuple representing: # * The number of tokens currently in the bucket, # * The time point when the bucket was last completely empty, and # * The rate_hz (leak rate) of this particular bucket. - self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict() + self.actions: Dict[Hashable, Tuple[float, float, float]] = {} + + self.clock.looping_call(self._prune_message_counts, 60 * 1000) def _get_key( self, requester: Optional[Requester], key: Optional[Hashable] @@ -164,14 +173,25 @@ class Ratelimiter: if override and not override.messages_per_second: return True, -1.0 + if requester and self._ratelimit_callbacks: + # Check if the user has a custom rate limit for this specific limiter + # as returned by the module API. + module_override = ( + await self._ratelimit_callbacks.get_ratelimit_override_for_user( + requester.user.to_string(), + self._limiter_name, + ) + ) + + if module_override: + rate_hz = module_override.per_second + burst_count = module_override.burst_count + # Override default values if set time_now_s = _time_now_s if _time_now_s is not None else self.clock.time() rate_hz = rate_hz if rate_hz is not None else self.rate_hz burst_count = burst_count if burst_count is not None else self.burst_count - # Remove any expired entries - self._prune_message_counts(time_now_s) - # Check if there is an existing count entry for this key action_count, time_start, _ = self._get_action_counts(key, time_now_s) @@ -246,13 +266,12 @@ class Ratelimiter: action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s) self.actions[key] = (action_count + n_actions, time_start, rate_hz) - def _prune_message_counts(self, time_now_s: float) -> None: + def _prune_message_counts(self) -> None: """Remove message count entries that have not exceeded their defined rate_hz limit - - Args: - time_now_s: The current time """ + time_now_s = self.clock.time() + # We create a copy of the key list here as the dictionary is modified during # the loop for key in list(self.actions.keys()): @@ -275,6 +294,7 @@ class Ratelimiter: update: bool = True, n_actions: int = 1, _time_now_s: Optional[float] = None, + pause: Optional[float] = 0.5, ) -> None: """Checks if an action can be performed. If not, raises a LimitExceededError @@ -298,6 +318,8 @@ class Ratelimiter: at all. _time_now_s: The current time. Optional, defaults to the current time according to self.clock. Only used by tests. + pause: Time in seconds to pause when an action is being limited. Defaults to 0.5 + to stop clients from "tight-looping" on retrying their request. Raises: LimitExceededError: If an action could not be performed, along with the time in @@ -316,9 +338,8 @@ class Ratelimiter: ) if not allowed: - # We pause for a bit here to stop clients from "tight-looping" on - # retrying their request. - await self.clock.sleep(0.5) + if pause: + await self.clock.sleep(pause) raise LimitExceededError( limiter_name=self._limiter_name, diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py
index fbc1d58ecb..697acc25ba 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py
@@ -107,6 +107,8 @@ class RoomVersion: # support the flag. Unknown flags are ignored by the evaluator, making conditions # fail if used. msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag + # MSC3757: Restricting who can overwrite a state event + msc3757_enabled: bool class RoomVersions: @@ -128,6 +130,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V2 = RoomVersion( "2", @@ -147,6 +150,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V3 = RoomVersion( "3", @@ -166,6 +170,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V4 = RoomVersion( "4", @@ -185,6 +190,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V5 = RoomVersion( "5", @@ -204,6 +210,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V6 = RoomVersion( "6", @@ -223,6 +230,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V7 = RoomVersion( "7", @@ -242,6 +250,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V8 = RoomVersion( "8", @@ -261,6 +270,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V9 = RoomVersion( "9", @@ -280,6 +290,7 @@ class RoomVersions: knock_restricted_join_rule=False, enforce_int_power_levels=False, msc3931_push_features=(), + msc3757_enabled=False, ) V10 = RoomVersion( "10", @@ -299,6 +310,7 @@ class RoomVersions: knock_restricted_join_rule=True, enforce_int_power_levels=True, msc3931_push_features=(), + msc3757_enabled=False, ) MSC1767v10 = RoomVersion( # MSC1767 (Extensible Events) based on room version "10" @@ -319,6 +331,28 @@ class RoomVersions: knock_restricted_join_rule=True, enforce_int_power_levels=True, msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,), + msc3757_enabled=False, + ) + MSC3757v10 = RoomVersion( + # MSC3757 (Restricting who can overwrite a state event) based on room version "10" + "org.matrix.msc3757.10", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + implicit_room_creator=False, + updated_redaction_rules=False, + restricted_join_rule=True, + restricted_join_rule_fix=True, + knock_join_rule=True, + msc3389_relation_redactions=False, + knock_restricted_join_rule=True, + enforce_int_power_levels=True, + msc3931_push_features=(), + msc3757_enabled=True, ) V11 = RoomVersion( "11", @@ -338,6 +372,28 @@ class RoomVersions: knock_restricted_join_rule=True, enforce_int_power_levels=True, msc3931_push_features=(), + msc3757_enabled=False, + ) + MSC3757v11 = RoomVersion( + # MSC3757 (Restricting who can overwrite a state event) based on room version "11" + "org.matrix.msc3757.11", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + implicit_room_creator=True, # Used by MSC3820 + updated_redaction_rules=True, # Used by MSC3820 + restricted_join_rule=True, + restricted_join_rule_fix=True, + knock_join_rule=True, + msc3389_relation_redactions=False, + knock_restricted_join_rule=True, + enforce_int_power_levels=True, + msc3931_push_features=(), + msc3757_enabled=True, ) @@ -355,42 +411,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = { RoomVersions.V9, RoomVersions.V10, RoomVersions.V11, - ) -} - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class RoomVersionCapability: - """An object which describes the unique attributes of a room version.""" - - identifier: str # the identifier for this capability - preferred_version: Optional[RoomVersion] - support_check_lambda: Callable[[RoomVersion], bool] - - -MSC3244_CAPABILITIES = { - cap.identifier: { - "preferred": ( - cap.preferred_version.identifier - if cap.preferred_version is not None - else None - ), - "support": [ - v.identifier - for v in KNOWN_ROOM_VERSIONS.values() - if cap.support_check_lambda(v) - ], - } - for cap in ( - RoomVersionCapability( - "knock", - RoomVersions.V7, - lambda room_version: room_version.knock_join_rule, - ), - RoomVersionCapability( - "restricted", - RoomVersions.V9, - lambda room_version: room_version.restricted_join_rule, - ), + RoomVersions.MSC3757v10, + RoomVersions.MSC3757v11, ) } diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index d077a2c613..655b5edd7a 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py
@@ -19,10 +19,12 @@ # # -"""Contains the URL paths to prefix various aspects of the server with. """ +"""Contains the URL paths to prefix various aspects of the server with.""" + import hmac from hashlib import sha256 -from urllib.parse import urlencode +from typing import Optional +from urllib.parse import urlencode, urljoin from synapse.config import ConfigError from synapse.config.homeserver import HomeServerConfig @@ -65,3 +67,42 @@ class ConsentURIBuilder: urlencode({"u": user_id, "h": mac}), ) return consent_uri + + +class LoginSSORedirectURIBuilder: + def __init__(self, hs_config: HomeServerConfig): + self._public_baseurl = hs_config.server.public_baseurl + + def build_login_sso_redirect_uri( + self, *, idp_id: Optional[str], client_redirect_url: str + ) -> str: + """Build a `/login/sso/redirect` URI for the given identity provider. + + Builds `/_matrix/client/v3/login/sso/redirect/{idpId}?redirectUrl=xxx` when `idp_id` is specified. + Otherwise, builds `/_matrix/client/v3/login/sso/redirect?redirectUrl=xxx` when `idp_id` is `None`. + + Args: + idp_id: Optional ID of the identity provider + client_redirect_url: URL to redirect the user to after login + + Returns + The URI to follow when choosing a specific identity provider. + """ + base_url = urljoin( + self._public_baseurl, + f"{CLIENT_API_PREFIX}/v3/login/sso/redirect", + ) + + serialized_query_parameters = urlencode({"redirectUrl": client_redirect_url}) + + if idp_id: + resultant_url = urljoin( + # We have to add a trailing slash to the base URL to ensure that the + # last path segment is not stripped away when joining with another path. + f"{base_url}/", + f"{idp_id}?{serialized_query_parameters}", + ) + else: + resultant_url = f"{base_url}?{serialized_query_parameters}" + + return resultant_url diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index 53f1859256..75c65ccc0d 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py
@@ -3,7 +3,7 @@ # # Copyright 2020 The Matrix.org Foundation C.I.C. # Copyright 2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -51,8 +51,7 @@ from synapse.http.server import JsonResource, OptionsResource from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource -from synapse.rest import ClientRestResource -from synapse.rest.admin import register_servlets_for_media_repo +from synapse.rest import ClientRestResource, admin from synapse.rest.health import HealthResource from synapse.rest.key.v2 import KeyResource from synapse.rest.synapse.client import build_synapse_client_resource_tree @@ -65,6 +64,7 @@ from synapse.storage.databases.main.appservice import ( ) from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.client_ips import ClientIpWorkerStore +from synapse.storage.databases.main.delayed_events import DelayedEventsStore from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore from synapse.storage.databases.main.devices import DeviceWorkerStore from synapse.storage.databases.main.directory import DirectoryWorkerStore @@ -98,6 +98,7 @@ from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.storage.databases.main.search import SearchStore from synapse.storage.databases.main.session import SessionStore from synapse.storage.databases.main.signatures import SignatureWorkerStore +from synapse.storage.databases.main.sliding_sync import SlidingSyncStore from synapse.storage.databases.main.state import StateGroupWorkerStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.stream import StreamWorkerStore @@ -159,6 +160,8 @@ class GenericWorkerStore( SessionStore, TaskSchedulerWorkerStore, ExperimentalFeaturesStore, + SlidingSyncStore, + DelayedEventsStore, ): # Properties that multiple storage classes define. Tell mypy what the # expected type is. @@ -172,8 +175,13 @@ class GenericWorkerServer(HomeServer): def _listen_http(self, listener_config: ListenerConfig) -> None: assert listener_config.http_options is not None - # We always include a health resource. - resources: Dict[str, Resource] = {"/health": HealthResource()} + # We always include an admin resource that we populate with servlets as needed + admin_resource = JsonResource(self, canonical_json=False) + resources: Dict[str, Resource] = { + # We always include a health resource. + "/health": HealthResource(), + "/_synapse/admin": admin_resource, + } for res in listener_config.http_options.resources: for name in res.names: @@ -186,6 +194,7 @@ class GenericWorkerServer(HomeServer): resources.update(build_synapse_client_resource_tree(self)) resources["/.well-known"] = well_known_resource(self) + admin.register_servlets(self, admin_resource) elif name == "federation": resources[FEDERATION_PREFIX] = TransportLayerServer(self) @@ -195,15 +204,13 @@ class GenericWorkerServer(HomeServer): # We need to serve the admin servlets for media on the # worker. - admin_resource = JsonResource(self, canonical_json=False) - register_servlets_for_media_repo(self, admin_resource) + admin.register_servlets_for_media_repo(self, admin_resource) resources.update( { MEDIA_R0_PREFIX: media_repo, MEDIA_V3_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo, - "/_synapse/admin": admin_resource, } ) @@ -280,8 +287,7 @@ class GenericWorkerServer(HomeServer): elif listener.type == "metrics": if not self.config.metrics.enable_metrics: logger.warning( - "Metrics listener configured, but " - "enable_metrics is not True!" + "Metrics listener configured, but enable_metrics is not True!" ) else: if isinstance(listener, TCPListenerConfig): diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 2a824e8457..9b5ecf2c68 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py
@@ -54,6 +54,7 @@ from synapse.config.server import ListenerConfig, TCPListenerConfig from synapse.federation.transport.server import TransportLayerServer from synapse.http.additional_resource import AdditionalResource from synapse.http.server import ( + JsonResource, OptionsResource, RootOptionsRedirectResource, StaticResource, @@ -61,8 +62,7 @@ from synapse.http.server import ( from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource -from synapse.rest import ClientRestResource -from synapse.rest.admin import AdminRestResource +from synapse.rest import ClientRestResource, admin from synapse.rest.health import HealthResource from synapse.rest.key.v2 import KeyResource from synapse.rest.synapse.client import build_synapse_client_resource_tree @@ -180,24 +180,18 @@ class SynapseHomeServer(HomeServer): if compress: client_resource = gz_wrap(client_resource) + admin_resource = JsonResource(self, canonical_json=False) + admin.register_servlets(self, admin_resource) + resources.update( { CLIENT_API_PREFIX: client_resource, "/.well-known": well_known_resource(self), - "/_synapse/admin": AdminRestResource(self), + "/_synapse/admin": admin_resource, **build_synapse_client_resource_tree(self), } ) - if self.config.email.can_verify_email: - from synapse.rest.synapse.client.password_reset import ( - PasswordResetSubmitTokenResource, - ) - - resources["/_synapse/client/password_reset/email/submit_token"] = ( - PasswordResetSubmitTokenResource(self) - ) - if name == "consent": from synapse.rest.consent.consent_resource import ConsentResource @@ -286,8 +280,7 @@ class SynapseHomeServer(HomeServer): elif listener.type == "metrics": if not self.config.metrics.enable_metrics: logger.warning( - "Metrics listener configured, but " - "enable_metrics is not True!" + "Metrics listener configured, but enable_metrics is not True!" ) else: if isinstance(listener, TCPListenerConfig): @@ -349,12 +342,11 @@ def setup(config_options: List[str]) -> SynapseHomeServer: ): if ( not config.captcha.enable_registration_captcha - and not config.registration.registrations_require_3pid and not config.registration.registration_requires_token ): raise ConfigError( "You have enabled open registration without any verification. This is a known vector for " - "spam and abuse. If you would like to allow public registration, please consider adding email, " + "spam and abuse. If you would like to allow public registration, please consider adding " "captcha, or token-based verification. Otherwise this check can be removed by setting the " "`enable_registration_without_verification` config option to `true`." ) diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py
index f602bbbeea..07870a16ee 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py
@@ -34,6 +34,22 @@ if TYPE_CHECKING: logger = logging.getLogger("synapse.app.homeserver") +ONE_MINUTE_SECONDS = 60 +ONE_HOUR_SECONDS = 60 * ONE_MINUTE_SECONDS + +MILLISECONDS_PER_SECOND = 1000 + +INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS +""" +We wait 5 minutes to send the first set of stats as the server can be quite busy the +first few minutes +""" + +PHONE_HOME_INTERVAL_SECONDS = 3 * ONE_HOUR_SECONDS +""" +Phone home stats are sent every 3 hours +""" + # Contains the list of processes we will be monitoring # currently either 0 or 1 _stats_process: List[Tuple[int, "resource.struct_rusage"]] = [] @@ -46,10 +62,6 @@ current_mau_by_service_gauge = Gauge( ["app_service"], ) max_mau_gauge = Gauge("synapse_admin_mau_max", "MAU Limit") -registered_reserved_users_mau_gauge = Gauge( - "synapse_admin_mau_registered_reserved_users", - "Registered users with reserved threepids", -) @wrap_as_background_process("phone_stats_home") @@ -185,12 +197,14 @@ def start_phone_stats_home(hs: "HomeServer") -> None: # If you increase the loop period, the accuracy of user_daily_visits # table will decrease clock.looping_call( - hs.get_datastores().main.generate_user_daily_visits, 5 * 60 * 1000 + hs.get_datastores().main.generate_user_daily_visits, + 5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND, ) # monthly active user limiting functionality clock.looping_call( - hs.get_datastores().main.reap_monthly_active_users, 1000 * 60 * 60 + hs.get_datastores().main.reap_monthly_active_users, + ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND, ) hs.get_datastores().main.reap_monthly_active_users() @@ -198,20 +212,17 @@ def start_phone_stats_home(hs: "HomeServer") -> None: async def generate_monthly_active_users() -> None: current_mau_count = 0 current_mau_count_by_service: Mapping[str, int] = {} - reserved_users: Sized = () store = hs.get_datastores().main if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only: current_mau_count = await store.get_monthly_active_count() current_mau_count_by_service = ( await store.get_monthly_active_count_by_service() ) - reserved_users = await store.get_registered_reserved_users() current_mau_gauge.set(float(current_mau_count)) for app_service, count in current_mau_count_by_service.items(): current_mau_by_service_gauge.labels(app_service).set(float(count)) - registered_reserved_users_mau_gauge.set(float(len(reserved_users))) max_mau_gauge.set(float(hs.config.server.max_mau_value)) if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only: @@ -221,7 +232,12 @@ def start_phone_stats_home(hs: "HomeServer") -> None: if hs.config.metrics.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") - clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000, hs, stats) + clock.looping_call( + phone_stats_home, + PHONE_HOME_INTERVAL_SECONDS * MILLISECONDS_PER_SECOND, + hs, + stats, + ) # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process @@ -229,4 +245,6 @@ def start_phone_stats_home(hs: "HomeServer") -> None: # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes - clock.call_later(5 * 60, phone_stats_home, hs, stats) + clock.call_later( + INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS, phone_stats_home, hs, stats + ) diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index a96cdbf1e7..6ee5240c4e 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py
@@ -87,6 +87,7 @@ class ApplicationService: ip_range_whitelist: Optional[IPSet] = None, supports_ephemeral: bool = False, msc3202_transaction_extensions: bool = False, + msc4190_device_management: bool = False, ): self.token = token self.url = ( @@ -100,6 +101,7 @@ class ApplicationService: self.ip_range_whitelist = ip_range_whitelist self.supports_ephemeral = supports_ephemeral self.msc3202_transaction_extensions = msc3202_transaction_extensions + self.msc4190_device_management = msc4190_device_management if "|" in self.id: raise Exception("application service ID cannot contain '|' character") diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py
index bec83419a2..cba08dde85 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py
@@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2015, 2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023, 2025 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -54,6 +54,7 @@ UP & quit +---------- YES SUCCESS This is all tied together by the AppServiceScheduler which DIs the required components. """ + import logging from typing import ( TYPE_CHECKING, @@ -69,6 +70,8 @@ from typing import ( Tuple, ) +from twisted.internet.interfaces import IDelayedCall + from synapse.appservice import ( ApplicationService, ApplicationServiceState, @@ -449,6 +452,20 @@ class _TransactionController: recoverer.recover() logger.info("Now %i active recoverers", len(self.recoverers)) + def force_retry(self, service: ApplicationService) -> None: + """Forces a Recoverer to attempt delivery of transations immediately. + + Args: + service: + """ + recoverer = self.recoverers.get(service.id) + if not recoverer: + # No need to force a retry on a happy AS. + logger.info(f"{service.id} is not in recovery, not forcing retry") + return + + recoverer.force_retry() + async def _is_service_up(self, service: ApplicationService) -> bool: state = await self.store.get_appservice_state(service) return state == ApplicationServiceState.UP or state is None @@ -481,11 +498,12 @@ class _Recoverer: self.service = service self.callback = callback self.backoff_counter = 1 + self.scheduled_recovery: Optional[IDelayedCall] = None def recover(self) -> None: delay = 2**self.backoff_counter logger.info("Scheduling retries on %s in %fs", self.service.id, delay) - self.clock.call_later( + self.scheduled_recovery = self.clock.call_later( delay, run_as_background_process, "as-recoverer", self.retry ) @@ -495,6 +513,21 @@ class _Recoverer: self.backoff_counter += 1 self.recover() + def force_retry(self) -> None: + """Cancels the existing timer and forces an immediate retry in the background. + + Args: + service: + """ + # Prevent the existing backoff from occuring + if self.scheduled_recovery: + self.clock.cancel_call_later(self.scheduled_recovery) + # Run a retry, which will resechedule a recovery if it fails. + run_as_background_process( + "retry", + self.retry, + ) + async def retry(self) -> None: logger.info("Starting retries on %s", self.service.id) try: diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index adce34c03a..d367d45fea 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py
@@ -170,7 +170,7 @@ class Config: section: ClassVar[str] - def __init__(self, root_config: "RootConfig" = None): + def __init__(self, root_config: "RootConfig"): self.root = root_config # Get the path to the default Synapse template directory @@ -221,9 +221,13 @@ class Config: The number of milliseconds in the duration. Raises: - TypeError, if given something other than an integer or a string + TypeError: if given something other than an integer or a string, or the + duration is using an incorrect suffix. ValueError: if given a string not of the form described above. """ + # For integers, we prefer to use `type(value) is int` instead of + # `isinstance(value, int)` because we want to exclude subclasses of int, such as + # bool. if type(value) is int: # noqa: E721 return value elif isinstance(value, str): @@ -246,9 +250,20 @@ class Config: if suffix in sizes: value = value[:-1] size = sizes[suffix] + elif suffix.isdigit(): + # No suffix is treated as milliseconds. + value = value + size = 1 + else: + raise TypeError( + f"Bad duration suffix {value} (expected no suffix or one of these suffixes: {sizes.keys()})" + ) + return int(value) * size else: - raise TypeError(f"Bad duration {value!r}") + raise TypeError( + f"Bad duration type {value!r} (expected int or string duration)" + ) @staticmethod def abspath(file_path: str) -> str: @@ -430,7 +445,7 @@ class RootConfig: return res @classmethod - def invoke_all_static(cls, func_name: str, *args: Any, **kwargs: any) -> None: + def invoke_all_static(cls, func_name: str, *args: Any, **kwargs: Any) -> None: """ Invoke a static function on config objects this RootConfig is configured to use. @@ -574,6 +589,14 @@ class RootConfig: " Defaults to the directory containing the last config file", ) + config_parser.add_argument( + "--no-secrets-in-config", + dest="secrets_in_config", + action="store_false", + default=True, + help="Reject config options that expect an in-line secret as value.", + ) + cls.invoke_all_static("add_arguments", config_parser) @classmethod @@ -611,7 +634,10 @@ class RootConfig: config_dict = read_config_files(config_files) obj.parse_config_dict( - config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path + config_dict, + config_dir_path=config_dir_path, + data_dir_path=data_dir_path, + allow_secrets_in_config=config_args.secrets_in_config, ) obj.invoke_all("read_arguments", config_args) @@ -638,6 +664,13 @@ class RootConfig: help="Specify config file. Can be given multiple times and" " may specify directories containing *.yaml files.", ) + parser.add_argument( + "--no-secrets-in-config", + dest="secrets_in_config", + action="store_false", + default=True, + help="Reject config options that expect an in-line secret as value.", + ) # we nest the mutually-exclusive group inside another group so that the help # text shows them in their own group. @@ -806,14 +839,21 @@ class RootConfig: return None obj.parse_config_dict( - config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path + config_dict, + config_dir_path=config_dir_path, + data_dir_path=data_dir_path, + allow_secrets_in_config=config_args.secrets_in_config, ) obj.invoke_all("read_arguments", config_args) return obj def parse_config_dict( - self, config_dict: Dict[str, Any], config_dir_path: str, data_dir_path: str + self, + config_dict: Dict[str, Any], + config_dir_path: str, + data_dir_path: str, + allow_secrets_in_config: bool = True, ) -> None: """Read the information from the config dict into this Config object. @@ -831,6 +871,7 @@ class RootConfig: config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path, + allow_secrets_in_config=allow_secrets_in_config, ) def generate_missing_files( @@ -1006,7 +1047,7 @@ class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig): return self._get_instance(key) -def read_file(file_path: Any, config_path: Iterable[str]) -> str: +def read_file(file_path: Any, config_path: StrSequence) -> str: """Check the given file exists, and read it into a string If it does not, emit an error indicating the problem diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index d9cb0da38b..baac814808 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi
@@ -30,7 +30,6 @@ from synapse.config import ( # noqa: F401 cas, consent, database, - emailconfig, experimental, federation, jwt, @@ -49,7 +48,6 @@ from synapse.config import ( # noqa: F401 retention, room, room_directory, - saml2, server, server_notices, spam_checker, @@ -59,6 +57,7 @@ from synapse.config import ( # noqa: F401 tls, tracer, user_directory, + user_types, voip, workers, ) @@ -96,13 +95,10 @@ class RootConfig: api: api.ApiConfig appservice: appservice.AppServiceConfig key: key.KeyConfig - saml2: saml2.SAML2Config - cas: cas.CasConfig sso: sso.SSOConfig oidc: oidc.OIDCConfig jwt: jwt.JWTConfig auth: auth.AuthConfig - email: emailconfig.EmailConfig worker: workers.WorkerConfig authproviders: password_auth_providers.PasswordAuthProviderConfig push: push.PushConfig @@ -122,6 +118,7 @@ class RootConfig: retention: retention.RetentionConfig background_updates: background_updates.BackgroundUpdateConfig auto_accept_invites: auto_accept_invites.AutoAcceptInvitesConfig + user_types: user_types.UserTypesConfig config_classes: List[Type["Config"]] = ... config_files: List[str] @@ -132,7 +129,11 @@ class RootConfig: @classmethod def invoke_all_static(cls, func_name: str, *args: Any, **kwargs: Any) -> None: ... def parse_config_dict( - self, config_dict: Dict[str, Any], config_dir_path: str, data_dir_path: str + self, + config_dict: Dict[str, Any], + config_dir_path: str, + data_dir_path: str, + allow_secrets_in_config: bool = ..., ) -> None: ... def generate_config( self, @@ -175,7 +176,7 @@ class RootConfig: class Config: root: RootConfig default_template_dir: str - def __init__(self, root_config: Optional[RootConfig] = ...) -> None: ... + def __init__(self, root_config: RootConfig = ...) -> None: ... @staticmethod def parse_size(value: Union[str, int]) -> int: ... @staticmethod @@ -208,4 +209,4 @@ class ShardedWorkerHandlingConfig: class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig): def get_instance(self, key: str) -> str: ... # noqa: F811 -def read_file(file_path: Any, config_path: Iterable[str]) -> str: ... +def read_file(file_path: Any, config_path: StrSequence) -> str: ... diff --git a/synapse/config/_util.py b/synapse/config/_util.py
index 32b906a1ec..731b60a840 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py
@@ -18,17 +18,11 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar +from typing import Any, Dict, Type, TypeVar import jsonschema -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel, ValidationError, parse_obj_as -else: - from pydantic import BaseModel, ValidationError, parse_obj_as - +from synapse._pydantic_compat import BaseModel, ValidationError, parse_obj_as from synapse.config._base import ConfigError from synapse.types import JsonDict, StrSequence diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py
index 6ff00e1ff8..dda6bcd1b7 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py
@@ -183,6 +183,18 @@ def _load_appservice( "The `org.matrix.msc3202` option should be true or false if specified." ) + # Opt-in flag for the MSC4190 behaviours. + # When enabled, the following C-S API endpoints change for appservices: + # - POST /register does not return an access token + # - PUT /devices/{device_id} creates a new device if one does not exist + # - DELETE /devices/{device_id} no longer requires UIA + # - POST /delete_devices/{device_id} no longer requires UIA + msc4190_enabled = as_info.get("io.element.msc4190", False) + if not isinstance(msc4190_enabled, bool): + raise ValueError( + "The `io.element.msc4190` option should be true or false if specified." + ) + return ApplicationService( token=as_info["as_token"], url=as_info["url"], @@ -195,4 +207,5 @@ def _load_appservice( ip_range_whitelist=ip_range_whitelist, supports_ephemeral=supports_ephemeral, msc3202_transaction_extensions=msc3202_transaction_extensions, + msc4190_device_management=msc4190_enabled, ) diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py
index 84897c09c5..57d67abbc3 100644 --- a/synapse/config/captcha.py +++ b/synapse/config/captcha.py
@@ -29,8 +29,15 @@ from ._base import Config, ConfigError class CaptchaConfig(Config): section = "captcha" - def read_config(self, config: JsonDict, **kwargs: Any) -> None: + def read_config( + self, config: JsonDict, allow_secrets_in_config: bool, **kwargs: Any + ) -> None: recaptcha_private_key = config.get("recaptcha_private_key") + if recaptcha_private_key and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("recaptcha_private_key",), + ) if recaptcha_private_key is not None and not isinstance( recaptcha_private_key, str ): @@ -38,6 +45,11 @@ class CaptchaConfig(Config): self.recaptcha_private_key = recaptcha_private_key recaptcha_public_key = config.get("recaptcha_public_key") + if recaptcha_public_key and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("recaptcha_public_key",), + ) if recaptcha_public_key is not None and not isinstance( recaptcha_public_key, str ): diff --git a/synapse/config/cas.py b/synapse/config/cas.py deleted file mode 100644
index fa59c350c1..0000000000 --- a/synapse/config/cas.py +++ /dev/null
@@ -1,111 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2021 The Matrix.org Foundation C.I.C. -# Copyright 2015, 2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -from typing import Any, List - -from synapse.config.sso import SsoAttributeRequirement -from synapse.types import JsonDict - -from ._base import Config, ConfigError -from ._util import validate_config - - -class CasConfig(Config): - """Cas Configuration - - cas_server_url: URL of CAS server - """ - - section = "cas" - - def read_config(self, config: JsonDict, **kwargs: Any) -> None: - cas_config = config.get("cas_config", None) - self.cas_enabled = cas_config and cas_config.get("enabled", True) - - if self.cas_enabled: - self.cas_server_url = cas_config["server_url"] - - # TODO Update this to a _synapse URL. - public_baseurl = self.root.server.public_baseurl - self.cas_service_url = public_baseurl + "_matrix/client/r0/login/cas/ticket" - - self.cas_protocol_version = cas_config.get("protocol_version") - if ( - self.cas_protocol_version is not None - and self.cas_protocol_version not in [1, 2, 3] - ): - raise ConfigError( - "Unsupported CAS protocol version %s (only versions 1, 2, 3 are supported)" - % (self.cas_protocol_version,), - ("cas_config", "protocol_version"), - ) - self.cas_displayname_attribute = cas_config.get("displayname_attribute") - required_attributes = cas_config.get("required_attributes") or {} - self.cas_required_attributes = _parsed_required_attributes_def( - required_attributes - ) - - self.cas_enable_registration = cas_config.get("enable_registration", True) - - self.cas_allow_numeric_ids = cas_config.get("allow_numeric_ids") - self.cas_numeric_ids_prefix = cas_config.get("numeric_ids_prefix") - if ( - self.cas_numeric_ids_prefix is not None - and self.cas_numeric_ids_prefix.isalnum() is False - ): - raise ConfigError( - "Only alphanumeric characters are allowed for numeric IDs prefix", - ("cas_config", "numeric_ids_prefix"), - ) - - self.idp_name = cas_config.get("idp_name", "CAS") - self.idp_icon = cas_config.get("idp_icon") - self.idp_brand = cas_config.get("idp_brand") - - else: - self.cas_server_url = None - self.cas_service_url = None - self.cas_protocol_version = None - self.cas_displayname_attribute = None - self.cas_required_attributes = [] - self.cas_enable_registration = False - self.cas_allow_numeric_ids = False - self.cas_numeric_ids_prefix = "u" - - -# CAS uses a legacy required attributes mapping, not the one provided by -# SsoAttributeRequirement. -REQUIRED_ATTRIBUTES_SCHEMA = { - "type": "object", - "additionalProperties": {"anyOf": [{"type": "string"}, {"type": "null"}]}, -} - - -def _parsed_required_attributes_def( - required_attributes: Any, -) -> List[SsoAttributeRequirement]: - validate_config( - REQUIRED_ATTRIBUTES_SCHEMA, - required_attributes, - config_path=("cas_config", "required_attributes"), - ) - return [SsoAttributeRequirement(k, v) for k, v in required_attributes.items()] diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py deleted file mode 100644
index 8033fa2e52..0000000000 --- a/synapse/config/emailconfig.py +++ /dev/null
@@ -1,366 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2019 The Matrix.org Foundation C.I.C. -# Copyright 2015-2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -# This file can't be called email.py because if it is, we cannot: -import email.utils -import logging -import os -from typing import Any - -import attr - -from synapse.types import JsonDict - -from ._base import Config, ConfigError - -logger = logging.getLogger(__name__) - -MISSING_PASSWORD_RESET_CONFIG_ERROR = """\ -Password reset emails are enabled on this homeserver due to a partial -'email' block. However, the following required keys are missing: - %s -""" - -DEFAULT_SUBJECTS = { - "message_from_person_in_room": "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room...", - "message_from_person": "[%(app)s] You have a message on %(app)s from %(person)s...", - "messages_from_person": "[%(app)s] You have messages on %(app)s from %(person)s...", - "messages_in_room": "[%(app)s] You have messages on %(app)s in the %(room)s room...", - "messages_in_room_and_others": "[%(app)s] You have messages on %(app)s in the %(room)s room and others...", - "messages_from_person_and_others": "[%(app)s] You have messages on %(app)s from %(person)s and others...", - "invite_from_person": "[%(app)s] %(person)s has invited you to chat on %(app)s...", - "invite_from_person_to_room": "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s...", - "invite_from_person_to_space": "[%(app)s] %(person)s has invited you to join the %(space)s space on %(app)s...", - "password_reset": "[%(server_name)s] Password reset", - "email_validation": "[%(server_name)s] Validate your email", - "email_already_in_use": "[%(server_name)s] Email already in use", -} - -LEGACY_TEMPLATE_DIR_WARNING = """ -This server's configuration file is using the deprecated 'template_dir' setting in the -'email' section. Support for this setting has been deprecated and will be removed in a -future version of Synapse. Server admins should instead use the new -'custom_template_directory' setting documented here: -https://element-hq.github.io/synapse/latest/templates.html ----------------------------------------------------------------------------------------""" - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class EmailSubjectConfig: - message_from_person_in_room: str - message_from_person: str - messages_from_person: str - messages_in_room: str - messages_in_room_and_others: str - messages_from_person_and_others: str - invite_from_person: str - invite_from_person_to_room: str - invite_from_person_to_space: str - password_reset: str - email_validation: str - email_already_in_use: str - - -class EmailConfig(Config): - section = "email" - - def read_config(self, config: JsonDict, **kwargs: Any) -> None: - # TODO: We should separate better the email configuration from the notification - # and account validity config. - - self.email_enable_notifs = False - - email_config = config.get("email") - if email_config is None: - email_config = {} - - self.force_tls = email_config.get("force_tls", False) - self.email_smtp_host = email_config.get("smtp_host", "localhost") - self.email_smtp_port = email_config.get( - "smtp_port", 465 if self.force_tls else 25 - ) - self.email_smtp_user = email_config.get("smtp_user", None) - self.email_smtp_pass = email_config.get("smtp_pass", None) - self.require_transport_security = email_config.get( - "require_transport_security", False - ) - self.enable_smtp_tls = email_config.get("enable_tls", True) - if self.force_tls and not self.enable_smtp_tls: - raise ConfigError("email.force_tls requires email.enable_tls to be true") - if self.require_transport_security and not self.enable_smtp_tls: - raise ConfigError( - "email.require_transport_security requires email.enable_tls to be true" - ) - - if "app_name" in email_config: - self.email_app_name = email_config["app_name"] - else: - self.email_app_name = "Matrix" - - # TODO: Rename notif_from to something more generic, or have a separate - # from for password resets, message notifications, etc? - # Currently the email section is a bit bogged down with settings for - # multiple functions. Would be good to split it out into separate - # sections and only put the common ones under email: - self.email_notif_from = email_config.get("notif_from", None) - if self.email_notif_from is not None: - # make sure it's valid - parsed = email.utils.parseaddr(self.email_notif_from) - if parsed[1] == "": - raise RuntimeError("Invalid notif_from address") - - # A user-configurable template directory - template_dir = email_config.get("template_dir") - if template_dir is not None: - logger.warning(LEGACY_TEMPLATE_DIR_WARNING) - - if isinstance(template_dir, str): - # We need an absolute path, because we change directory after starting (and - # we don't yet know what auxiliary templates like mail.css we will need). - template_dir = os.path.abspath(template_dir) - elif template_dir is not None: - # If template_dir is something other than a str or None, warn the user - raise ConfigError("Config option email.template_dir must be type str") - - self.email_enable_notifs = email_config.get("enable_notifs", False) - - if config.get("trust_identity_server_for_password_resets"): - raise ConfigError( - 'The config option "trust_identity_server_for_password_resets" ' - "is no longer supported. Please remove it from the config file." - ) - - # If we have email config settings, assume that we can verify ownership of - # email addresses. - self.can_verify_email = email_config != {} - - # Get lifetime of a validation token in milliseconds - self.email_validation_token_lifetime = self.parse_duration( - email_config.get("validation_token_lifetime", "1h") - ) - - if self.can_verify_email: - missing = [] - if not self.email_notif_from: - missing.append("email.notif_from") - - if missing: - raise ConfigError( - MISSING_PASSWORD_RESET_CONFIG_ERROR % (", ".join(missing),) - ) - - # These email templates have placeholders in them, and thus must be - # parsed using a templating engine during a request - password_reset_template_html = email_config.get( - "password_reset_template_html", "password_reset.html" - ) - password_reset_template_text = email_config.get( - "password_reset_template_text", "password_reset.txt" - ) - registration_template_html = email_config.get( - "registration_template_html", "registration.html" - ) - registration_template_text = email_config.get( - "registration_template_text", "registration.txt" - ) - already_in_use_template_html = email_config.get( - "already_in_use_template_html", "already_in_use.html" - ) - already_in_use_template_text = email_config.get( - "already_in_use_template_html", "already_in_use.txt" - ) - add_threepid_template_html = email_config.get( - "add_threepid_template_html", "add_threepid.html" - ) - add_threepid_template_text = email_config.get( - "add_threepid_template_text", "add_threepid.txt" - ) - - password_reset_template_failure_html = email_config.get( - "password_reset_template_failure_html", "password_reset_failure.html" - ) - registration_template_failure_html = email_config.get( - "registration_template_failure_html", "registration_failure.html" - ) - add_threepid_template_failure_html = email_config.get( - "add_threepid_template_failure_html", "add_threepid_failure.html" - ) - - # These templates do not support any placeholder variables, so we - # will read them from disk once during setup - password_reset_template_success_html = email_config.get( - "password_reset_template_success_html", "password_reset_success.html" - ) - registration_template_success_html = email_config.get( - "registration_template_success_html", "registration_success.html" - ) - add_threepid_template_success_html = email_config.get( - "add_threepid_template_success_html", "add_threepid_success.html" - ) - - # Read all templates from disk - ( - self.email_password_reset_template_html, - self.email_password_reset_template_text, - self.email_registration_template_html, - self.email_registration_template_text, - self.email_already_in_use_template_html, - self.email_already_in_use_template_text, - self.email_add_threepid_template_html, - self.email_add_threepid_template_text, - self.email_password_reset_template_confirmation_html, - self.email_password_reset_template_failure_html, - self.email_registration_template_failure_html, - self.email_add_threepid_template_failure_html, - password_reset_template_success_html_template, - registration_template_success_html_template, - add_threepid_template_success_html_template, - ) = self.read_templates( - [ - password_reset_template_html, - password_reset_template_text, - registration_template_html, - registration_template_text, - already_in_use_template_html, - already_in_use_template_text, - add_threepid_template_html, - add_threepid_template_text, - "password_reset_confirmation.html", - password_reset_template_failure_html, - registration_template_failure_html, - add_threepid_template_failure_html, - password_reset_template_success_html, - registration_template_success_html, - add_threepid_template_success_html, - ], - ( - td - for td in ( - self.root.server.custom_template_directory, - template_dir, - ) - if td - ), # Filter out template_dir if not provided - ) - - # Render templates that do not contain any placeholders - self.email_password_reset_template_success_html_content = ( - password_reset_template_success_html_template.render() - ) - self.email_registration_template_success_html_content = ( - registration_template_success_html_template.render() - ) - self.email_add_threepid_template_success_html_content = ( - add_threepid_template_success_html_template.render() - ) - - if self.email_enable_notifs: - missing = [] - if not self.email_notif_from: - missing.append("email.notif_from") - - if missing: - raise ConfigError( - "email.enable_notifs is True but required keys are missing: %s" - % (", ".join(missing),) - ) - - notif_template_html = email_config.get( - "notif_template_html", "notif_mail.html" - ) - notif_template_text = email_config.get( - "notif_template_text", "notif_mail.txt" - ) - - ( - self.email_notif_template_html, - self.email_notif_template_text, - ) = self.read_templates( - [notif_template_html, notif_template_text], - ( - td - for td in ( - self.root.server.custom_template_directory, - template_dir, - ) - if td - ), # Filter out template_dir if not provided - ) - - self.email_notif_for_new_users = email_config.get( - "notif_for_new_users", True - ) - self.email_riot_base_url = email_config.get( - "client_base_url", email_config.get("riot_base_url", None) - ) - # The amount of time we always wait before ever emailing about a notification - # (to give the user a chance to respond to other push or notice the window) - self.notif_delay_before_mail_ms = Config.parse_duration( - email_config.get("notif_delay_before_mail", "10m") - ) - - if self.root.account_validity.account_validity_renew_by_email_enabled: - expiry_template_html = email_config.get( - "expiry_template_html", "notice_expiry.html" - ) - expiry_template_text = email_config.get( - "expiry_template_text", "notice_expiry.txt" - ) - - ( - self.account_validity_template_html, - self.account_validity_template_text, - ) = self.read_templates( - [expiry_template_html, expiry_template_text], - ( - td - for td in ( - self.root.server.custom_template_directory, - template_dir, - ) - if td - ), # Filter out template_dir if not provided - ) - - subjects_config = email_config.get("subjects", {}) - subjects = {} - - for key, default in DEFAULT_SUBJECTS.items(): - subjects[key] = subjects_config.get(key, default) - - self.email_subjects = EmailSubjectConfig(**subjects) - - # The invite client location should be a HTTP(S) URL or None. - self.invite_client_location = email_config.get("invite_client_location") or None - if self.invite_client_location: - if not isinstance(self.invite_client_location, str): - raise ConfigError( - "Config option email.invite_client_location must be type str" - ) - if not ( - self.invite_client_location.startswith("http://") - or self.invite_client_location.startswith("https://") - ): - raise ConfigError( - "Config option email.invite_client_location must be a http or https URL", - path=("email", "invite_client_location"), - ) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index bae9cc8047..881aafc3f0 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py
@@ -20,6 +20,7 @@ # import enum +from functools import cache from typing import TYPE_CHECKING, Any, Optional import attr @@ -27,8 +28,8 @@ import attr.validators from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.config import ConfigError -from synapse.config._base import Config, RootConfig -from synapse.types import JsonDict +from synapse.config._base import Config, RootConfig, read_file +from synapse.types import JsonDict, StrSequence # Determine whether authlib is installed. try: @@ -43,6 +44,12 @@ if TYPE_CHECKING: from authlib.jose.rfc7517 import JsonWebKey +@cache +def read_secret_from_file_once(file_path: Any, config_path: StrSequence) -> str: + """Returns the memoized secret read from file.""" + return read_file(file_path, config_path).strip() + + class ClientAuthMethod(enum.Enum): """List of supported client auth methods.""" @@ -63,6 +70,40 @@ def _parse_jwks(jwks: Optional[JsonDict]) -> Optional["JsonWebKey"]: return JsonWebKey.import_key(jwks) +def _check_client_secret( + instance: "MSC3861", _attribute: attr.Attribute, _value: Optional[str] +) -> None: + if instance._client_secret and instance._client_secret_path: + raise ConfigError( + ( + "You have configured both " + "`experimental_features.msc3861.client_secret` and " + "`experimental_features.msc3861.client_secret_path`. " + "These are mutually incompatible." + ), + ("experimental", "msc3861", "client_secret"), + ) + # Check client secret can be retrieved + instance.client_secret() + + +def _check_admin_token( + instance: "MSC3861", _attribute: attr.Attribute, _value: Optional[str] +) -> None: + if instance._admin_token and instance._admin_token_path: + raise ConfigError( + ( + "You have configured both " + "`experimental_features.msc3861.admin_token` and " + "`experimental_features.msc3861.admin_token_path`. " + "These are mutually incompatible." + ), + ("experimental", "msc3861", "admin_token"), + ) + # Check client secret can be retrieved + instance.admin_token() + + @attr.s(slots=True, frozen=True) class MSC3861: """Configuration for MSC3861: Matrix architecture change to delegate authentication via OIDC""" @@ -97,15 +138,30 @@ class MSC3861: ) """The auth method used when calling the introspection endpoint.""" - client_secret: Optional[str] = attr.ib( + _client_secret: Optional[str] = attr.ib( default=None, - validator=attr.validators.optional(attr.validators.instance_of(str)), + validator=[ + attr.validators.optional(attr.validators.instance_of(str)), + _check_client_secret, + ], ) """ The client secret to use when calling the introspection endpoint, when using any of the client_secret_* client auth methods. """ + _client_secret_path: Optional[str] = attr.ib( + default=None, + validator=[ + attr.validators.optional(attr.validators.instance_of(str)), + _check_client_secret, + ], + ) + """ + Alternative to `client_secret`: allows the secret to be specified in an + external file. + """ + jwk: Optional["JsonWebKey"] = attr.ib(default=None, converter=_parse_jwks) """ The JWKS to use when calling the introspection endpoint, @@ -133,7 +189,7 @@ class MSC3861: ClientAuthMethod.CLIENT_SECRET_BASIC, ClientAuthMethod.CLIENT_SECRET_JWT, ) - and self.client_secret is None + and self.client_secret() is None ): raise ConfigError( f"A client secret must be provided when using the {value} client auth method", @@ -152,16 +208,51 @@ class MSC3861: ) """The URL of the My Account page on the OIDC Provider as per MSC2965.""" - admin_token: Optional[str] = attr.ib( + _admin_token: Optional[str] = attr.ib( default=None, - validator=attr.validators.optional(attr.validators.instance_of(str)), + validator=[ + attr.validators.optional(attr.validators.instance_of(str)), + _check_admin_token, + ], ) """ A token that should be considered as an admin token. This is used by the OIDC provider, to make admin calls to Synapse. """ - def check_config_conflicts(self, root: RootConfig) -> None: + _admin_token_path: Optional[str] = attr.ib( + default=None, + validator=[ + attr.validators.optional(attr.validators.instance_of(str)), + _check_admin_token, + ], + ) + """ + Alternative to `admin_token`: allows the secret to be specified in an + external file. + """ + + def client_secret(self) -> Optional[str]: + """Returns the secret given via `client_secret` or `client_secret_path`.""" + if self._client_secret_path: + return read_secret_from_file_once( + self._client_secret_path, + ("experimental_features", "msc3861", "client_secret_path"), + ) + return self._client_secret + + def admin_token(self) -> Optional[str]: + """Returns the admin token given via `admin_token` or `admin_token_path`.""" + if self._admin_token_path: + return read_secret_from_file_once( + self._admin_token_path, + ("experimental_features", "msc3861", "admin_token_path"), + ) + return self._admin_token + + def check_config_conflicts( + self, root: RootConfig, allow_secrets_in_config: bool + ) -> None: """Checks for any configuration conflicts with other parts of Synapse. Raises: @@ -171,6 +262,24 @@ class MSC3861: if not self.enabled: return + if self._client_secret and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("experimental", "msc3861", "client_secret"), + ) + + if self.jwk and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("experimental", "msc3861", "jwk"), + ) + + if self._admin_token and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("experimental", "msc3861", "admin_token"), + ) + if ( root.auth.password_enabled_for_reauth or root.auth.password_enabled_for_login @@ -195,8 +304,6 @@ class MSC3861: if ( root.oidc.oidc_enabled - or root.saml2.saml2_enabled - or root.cas.cas_enabled or root.jwt.jwt_enabled ): raise ConfigError("SSO cannot be enabled when OAuth delegation is enabled") @@ -236,12 +343,6 @@ class MSC3861: ("session_lifetime",), ) - if root.registration.enable_3pid_changes: - raise ConfigError( - "enable_3pid_changes cannot be enabled when OAuth delegation is enabled", - ("enable_3pid_changes",), - ) - @attr.s(auto_attribs=True, frozen=True, slots=True) class MSC3866Config: @@ -261,7 +362,9 @@ class ExperimentalConfig(Config): section = "experimental" - def read_config(self, config: JsonDict, **kwargs: Any) -> None: + def read_config( + self, config: JsonDict, allow_secrets_in_config: bool, **kwargs: Any + ) -> None: experimental = config.get("experimental_features") or {} # MSC3026 (busy presence state) @@ -288,9 +391,6 @@ class ExperimentalConfig(Config): ), ) - # MSC3244 (room version capabilities) - self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True) - # MSC3266 (room summary api) self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False) @@ -338,8 +438,10 @@ class ExperimentalConfig(Config): # MSC3391: Removing account data. self.msc3391_enabled = experimental.get("msc3391_enabled", False) - # MSC3575 (Sliding Sync API endpoints) - self.msc3575_enabled: bool = experimental.get("msc3575_enabled", False) + # MSC3575 (Sliding Sync) alternate endpoints, c.f. MSC4186. + # + # This is enabled by default as a replacement for the sliding sync proxy. + self.msc3575_enabled: bool = experimental.get("msc3575_enabled", True) # MSC3773: Thread notifications self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False) @@ -363,11 +465,6 @@ class ExperimentalConfig(Config): # MSC3874: Filtering /messages with rel_types / not_rel_types. self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False) - # MSC3886: Simple client rendezvous capability - self.msc3886_endpoint: Optional[str] = experimental.get( - "msc3886_endpoint", None - ) - # MSC3890: Remotely silence local notifications # Note: This option requires "experimental_features.msc3391_enabled" to be # set to "true", in order to communicate account data deletions to clients. @@ -408,7 +505,9 @@ class ExperimentalConfig(Config): ) from exc # Check that none of the other config options conflict with MSC3861 when enabled - self.msc3861.check_config_conflicts(self.root) + self.msc3861.check_config_conflicts( + self.root, allow_secrets_in_config=allow_secrets_in_config + ) self.msc4028_push_encrypted_events = experimental.get( "msc4028_push_encrypted_events", False @@ -439,12 +538,23 @@ class ExperimentalConfig(Config): ("experimental", "msc4108_delegation_endpoint"), ) - self.msc3823_account_suspension = experimental.get( - "msc3823_account_suspension", False - ) + # MSC4133: Custom profile fields + self.msc4133_enabled: bool = experimental.get("msc4133_enabled", False) + + # MSC4210: Remove legacy mentions + self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False) + + # MSC4222: Adding `state_after` to sync v2 + self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False) - # MSC4151: Report room API (Client-Server API) - self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False) + # MSC4076: Add `disable_badge_count`` to pusher configuration + self.msc4076_enabled: bool = experimental.get("msc4076_enabled", False) + + # MSC4263: Preventing MXID enumeration via key queries + self.msc4263_limit_key_queries_to_users_who_share_rooms = experimental.get( + "msc4263_limit_key_queries_to_users_who_share_rooms", + False, + ) - # MSC4156: Migrate server_name to via - self.msc4156_enabled: bool = experimental.get("msc4156_enabled", False) + # MSC4155: Invite filtering + self.msc4155_enabled: bool = experimental.get("msc4155_enabled", False) diff --git a/synapse/config/federation.py b/synapse/config/federation.py
index cf29fa2562..31f46e420d 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py
@@ -94,5 +94,21 @@ class FederationConfig(Config): 2**62, ) + def is_domain_allowed_according_to_federation_whitelist(self, domain: str) -> bool: + """ + Returns whether a domain is allowed according to the federation whitelist. If a + federation whitelist is not set, all domains are allowed. + + Args: + domain: The domain to test. + + Returns: + True if the domain is allowed or if a whitelist is not set, False otherwise. + """ + if self.federation_domain_whitelist is None: + return True + + return domain in self.federation_domain_whitelist + _METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}} diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index e36c0bd6ae..969261cb11 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py
@@ -27,10 +27,8 @@ from .auto_accept_invites import AutoAcceptInvitesConfig from .background_updates import BackgroundUpdateConfig from .cache import CacheConfig from .captcha import CaptchaConfig -from .cas import CasConfig from .consent import ConsentConfig from .database import DatabaseConfig -from .emailconfig import EmailConfig from .experimental import ExperimentalConfig from .federation import FederationConfig from .jwt import JWTConfig @@ -49,7 +47,6 @@ from .repository import ContentRepositoryConfig from .retention import RetentionConfig from .room import RoomConfig from .room_directory import RoomDirectoryConfig -from .saml2 import SAML2Config from .server import ServerConfig from .server_notices import ServerNoticesConfig from .spam_checker import SpamCheckerConfig @@ -59,6 +56,7 @@ from .third_party_event_rules import ThirdPartyRulesConfig from .tls import TlsConfig from .tracer import TracerConfig from .user_directory import UserDirectoryConfig +from .user_types import UserTypesConfig from .voip import VoipConfig from .workers import WorkerConfig @@ -84,13 +82,10 @@ class HomeServerConfig(RootConfig): ApiConfig, AppServiceConfig, KeyConfig, - SAML2Config, OIDCConfig, - CasConfig, SSOConfig, JWTConfig, AuthConfig, - EmailConfig, PasswordAuthProviderConfig, PushConfig, SpamCheckerConfig, @@ -107,4 +102,5 @@ class HomeServerConfig(RootConfig): ExperimentalConfig, BackgroundUpdateConfig, AutoAcceptInvitesConfig, + UserTypesConfig, ] diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py
index b41f2dc08f..5c76551f33 100644 --- a/synapse/config/jwt.py +++ b/synapse/config/jwt.py
@@ -38,6 +38,7 @@ class JWTConfig(Config): self.jwt_algorithm = jwt_config["algorithm"] self.jwt_subject_claim = jwt_config.get("subject_claim", "sub") + self.jwt_display_name_claim = jwt_config.get("display_name_claim") # The issuer and audiences are optional, if provided, it is asserted # that the claims exist on the JWT. @@ -49,5 +50,6 @@ class JWTConfig(Config): self.jwt_secret = None self.jwt_algorithm = None self.jwt_subject_claim = None + self.jwt_display_name_claim = None self.jwt_issuer = None self.jwt_audiences = None diff --git a/synapse/config/key.py b/synapse/config/key.py
index b9925a52d2..29c558448b 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py
@@ -43,7 +43,7 @@ from unpaddedbase64 import decode_base64 from synapse.types import JsonDict from synapse.util.stringutils import random_string, random_string_with_symbols -from ._base import Config, ConfigError +from ._base import Config, ConfigError, read_file if TYPE_CHECKING: from signedjson.key import VerifyKeyWithExpiry @@ -91,6 +91,16 @@ To suppress this warning and continue using 'matrix.org', admins should set 'suppress_key_server_warning' to 'true' in homeserver.yaml. --------------------------------------------------------------------------------""" +CONFLICTING_MACAROON_SECRET_KEY_OPTS_ERROR = """\ +Conflicting options 'macaroon_secret_key' and 'macaroon_secret_key_path' are +both defined in config file. +""" + +CONFLICTING_FORM_SECRET_OPTS_ERROR = """\ +Conflicting options 'form_secret' and 'form_secret_path' are both defined in +config file. +""" + logger = logging.getLogger(__name__) @@ -107,7 +117,11 @@ class KeyConfig(Config): section = "key" def read_config( - self, config: JsonDict, config_dir_path: str, **kwargs: Any + self, + config: JsonDict, + config_dir_path: str, + allow_secrets_in_config: bool, + **kwargs: Any, ) -> None: # the signing key can be specified inline or in a separate file if "signing_key" in config: @@ -166,10 +180,21 @@ class KeyConfig(Config): ) ) - macaroon_secret_key: Optional[str] = config.get( - "macaroon_secret_key", self.root.registration.registration_shared_secret - ) - + macaroon_secret_key = config.get("macaroon_secret_key") + if macaroon_secret_key and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("macaroon_secret_key",), + ) + macaroon_secret_key_path = config.get("macaroon_secret_key_path") + if macaroon_secret_key_path: + if macaroon_secret_key: + raise ConfigError(CONFLICTING_MACAROON_SECRET_KEY_OPTS_ERROR) + macaroon_secret_key = read_file( + macaroon_secret_key_path, ("macaroon_secret_key_path",) + ).strip() + if not macaroon_secret_key: + macaroon_secret_key = self.root.registration.registration_shared_secret if not macaroon_secret_key: # Unfortunately, there are people out there that don't have this # set. Lets just be "nice" and derive one from their secret key. @@ -181,7 +206,21 @@ class KeyConfig(Config): # a secret which is used to calculate HMACs for form values, to stop # falsification of values - self.form_secret = config.get("form_secret", None) + form_secret = config.get("form_secret", None) + if form_secret and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("form_secret",), + ) + form_secret_path = config.get("form_secret_path", None) + if form_secret_path: + if form_secret: + raise ConfigError(CONFLICTING_FORM_SECRET_OPTS_ERROR) + self.form_secret = read_file( + form_secret_path, ("form_secret_path",) + ).strip() + else: + self.form_secret = form_secret def generate_config_section( self, @@ -200,16 +239,13 @@ class KeyConfig(Config): ) form_secret = 'form_secret: "%s"' % random_string_with_symbols(50) - return ( - """\ + return """\ %(macaroon_secret_key)s %(form_secret)s signing_key_path: "%(base_key_name)s.signing.key" trusted_key_servers: - server_name: "matrix.org" - """ - % locals() - ) + """ % locals() def read_signing_keys(self, signing_key_path: str, name: str) -> List[SigningKey]: """Read the signing keys in the given path. @@ -249,7 +285,9 @@ class KeyConfig(Config): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) - verify_key: "VerifyKeyWithExpiry" = decode_verify_key_bytes(key_id, key_bytes) # type: ignore[assignment] + verify_key: "VerifyKeyWithExpiry" = decode_verify_key_bytes( + key_id, key_bytes + ) # type: ignore[assignment] verify_key.expired = key_data["expired_ts"] keys[key_id] = verify_key else: diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index fca0b08d6d..110ff75c63 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py
@@ -132,24 +132,11 @@ disable_existing_loggers: false """ ) -LOG_FILE_ERROR = """\ -Support for the log_file configuration option and --log-file command-line option was -removed in Synapse 1.3.0. You should instead set up a separate log configuration file. -""" - -STRUCTURED_ERROR = """\ -Support for the structured configuration option was removed in Synapse 1.54.0. -You should instead use the standard logging configuration. See -https://element-hq.github.io/synapse/v1.54/structured_logging.html -""" - class LoggingConfig(Config): section = "logging" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - if config.get("log_file"): - raise ConfigError(LOG_FILE_ERROR) self.log_config = self.abspath(config.get("log_config")) self.no_redirect_stdio = config.get("no_redirect_stdio", False) @@ -157,18 +144,13 @@ class LoggingConfig(Config): self, config_dir_path: str, server_name: str, **kwargs: Any ) -> str: log_config = os.path.join(config_dir_path, server_name + ".log.config") - return ( - """\ + return """\ log_config: "%(log_config)s" - """ - % locals() - ) + """ % locals() def read_arguments(self, args: argparse.Namespace) -> None: if args.no_redirect_stdio is not None: self.no_redirect_stdio = args.no_redirect_stdio - if args.log_file is not None: - raise ConfigError(LOG_FILE_ERROR) @staticmethod def add_arguments(parser: argparse.ArgumentParser) -> None: @@ -296,10 +278,6 @@ def _load_logging_config(log_config_path: str) -> None: if not log_config: logging.warning("Loaded a blank logging config?") - # If the old structured logging configuration is being used, raise an error. - if "structured" in log_config and log_config.get("structured"): - raise ConfigError(STRUCTURED_ERROR) - logging.config.dictConfig(log_config) # Blow away the pyo3-log cache so that it reloads the configuration. @@ -363,5 +341,6 @@ def setup_logging( "Licensed under the AGPL 3.0 license. Website: https://github.com/element-hq/synapse" ) logging.info("Server hostname: %s", config.server.server_name) + logging.info("Public Base URL: %s", config.server.public_baseurl) logging.info("Instance name: %s", hs.get_instance_name()) logging.info("Twisted reactor: %s", type(reactor).__name__) diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py
index d0a03baf55..b18654ff6a 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py
@@ -125,6 +125,10 @@ OIDC_PROVIDER_CONFIG_SCHEMA = { "enum": ["client_secret_basic", "client_secret_post", "none"], }, "pkce_method": {"type": "string", "enum": ["auto", "always", "never"]}, + "id_token_signing_alg_values_supported": { + "type": "array", + "items": {"type": "string"}, + }, "scopes": {"type": "array", "items": {"type": "string"}}, "authorization_endpoint": {"type": "string"}, "token_endpoint": {"type": "string"}, @@ -137,6 +141,9 @@ OIDC_PROVIDER_CONFIG_SCHEMA = { "type": "string", "enum": ["auto", "userinfo_endpoint"], }, + "redirect_uri": { + "type": ["string", "null"], + }, "allow_existing_users": {"type": "boolean"}, "user_mapping_provider": {"type": ["object", "null"]}, "attribute_requirements": { @@ -248,7 +255,7 @@ def _parse_oidc_config_dict( idp_id = oidc_config.get("idp_id", "oidc") # prefix the given IDP with a prefix specific to the SSO mechanism, to avoid - # clashes with other mechs (such as SAML, CAS). + # clashes with other mechs). # # We allow "oidc" as an exception so that people migrating from old-style # "oidc_config" format (which has long used "oidc" as its idp_id) can migrate to @@ -326,6 +333,9 @@ def _parse_oidc_config_dict( client_secret_jwt_key=client_secret_jwt_key, client_auth_method=client_auth_method, pkce_method=oidc_config.get("pkce_method", "auto"), + id_token_signing_alg_values_supported=oidc_config.get( + "id_token_signing_alg_values_supported" + ), scopes=oidc_config.get("scopes", ["openid"]), authorization_endpoint=oidc_config.get("authorization_endpoint"), token_endpoint=oidc_config.get("token_endpoint"), @@ -337,6 +347,7 @@ def _parse_oidc_config_dict( ), skip_verification=oidc_config.get("skip_verification", False), user_profile_method=oidc_config.get("user_profile_method", "auto"), + redirect_uri=oidc_config.get("redirect_uri"), allow_existing_users=oidc_config.get("allow_existing_users", False), user_mapping_provider_class=user_mapping_provider_class, user_mapping_provider_config=user_mapping_provider_config, @@ -345,6 +356,9 @@ def _parse_oidc_config_dict( additional_authorization_parameters=oidc_config.get( "additional_authorization_parameters", {} ), + passthrough_authorization_parameters=oidc_config.get( + "passthrough_authorization_parameters", [] + ), ) @@ -402,6 +416,34 @@ class OidcProviderConfig: # Valid values are 'auto', 'always', and 'never'. pkce_method: str + id_token_signing_alg_values_supported: Optional[List[str]] + """ + List of the JWS signing algorithms (`alg` values) that are supported for signing the + `id_token`. + + This is *not* required if `discovery` is disabled. We default to supporting `RS256` + in the downstream usage if no algorithms are configured here or in the discovery + document. + + According to the spec, the algorithm `"RS256"` MUST be included. The absolute rigid + approach would be to reject this provider as non-compliant if it's not included but + we can just allow whatever and see what happens (they're the ones that configured + the value and cooperating with the identity provider). It wouldn't be wise to add it + ourselves because absence of `RS256` might indicate that the provider actually + doesn't support it, despite the spec requirement. Adding it silently could lead to + failed authentication attempts or strange mismatch attacks. + + The `alg` value `"none"` MAY be supported but can only be used if the Authorization + Endpoint does not include `id_token` in the `response_type` (ex. + `/authorize?response_type=code` where `none` can apply, + `/authorize?response_type=code%20id_token` where `none` can't apply) (such as when + using the Authorization Code Flow). + + Spec: + - https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata + - https://openid.net/specs/openid-connect-core-1_0.html#AuthorizationExamples + """ + # list of scopes to request scopes: Collection[str] @@ -432,6 +474,18 @@ class OidcProviderConfig: # values are: "auto" or "userinfo_endpoint". user_profile_method: str + redirect_uri: Optional[str] + """ + An optional replacement for Synapse's hardcoded `redirect_uri` URL + (`<public_baseurl>/_synapse/client/oidc/callback`). This can be used to send + the client to a different URL after it receives a response from the + `authorization_endpoint`. + + If this is set, the client is expected to call Synapse's OIDC callback URL + reproduced above itself with the necessary parameters and session cookie, in + order to complete OIDC login. + """ + # whether to allow a user logging in via OIDC to match a pre-existing account # instead of failing allow_existing_users: bool @@ -450,3 +504,6 @@ class OidcProviderConfig: # Additional parameters that will be passed to the authorization grant URL additional_authorization_parameters: Mapping[str, str] + + # Allow query parameters to the redirect endpoint that will be passed to the authorization grant URL + passthrough_authorization_parameters: Collection[str] diff --git a/synapse/config/push.py b/synapse/config/push.py
index bc24833702..5ca624e69e 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py
@@ -37,26 +37,12 @@ class PushConfig(Config): "group_unread_count_by_room", True ) - # There was a a 'redact_content' setting but mistakenly read from the - # 'email'section'. Check for the flag in the 'push' section, and log, - # but do not honour it to avoid nasty surprises when people upgrade. if push_config.get("redact_content") is not None: print( "The push.redact_content content option has never worked. " "Please set push.include_content if you want this behaviour" ) - # Now check for the one in the 'email' section and honour it, - # with a warning. - email_push_config = config.get("email") or {} - redact_content = email_push_config.get("redact_content") - if redact_content is not None: - print( - "The 'email.redact_content' option is deprecated: " - "please set push.include_content instead" - ) - self.push_include_content = not redact_content - # Whether to apply a random delay to outbound push. self.push_jitter_delay_ms = None push_jitter_delay = push_config.get("jitter_delay", None) diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 3fa33f5373..eb1dc2dacb 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py
@@ -228,3 +228,15 @@ class RatelimitConfig(Config): config.get("remote_media_download_burst_count", "500M") ), ) + + self.rc_presence_per_user = RatelimitSettings.parse( + config, + "rc_presence.per_user", + defaults={"per_second": 0.1, "burst_count": 1}, + ) + + self.rc_delayed_event_mgmt = RatelimitSettings.parse( + config, + "rc_delayed_event_mgmt", + defaults={"per_second": 1, "burst_count": 5}, + ) diff --git a/synapse/config/redis.py b/synapse/config/redis.py
index f140538088..948c95eef7 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py
@@ -21,15 +21,22 @@ from typing import Any -from synapse.config._base import Config +from synapse.config._base import Config, ConfigError, read_file from synapse.types import JsonDict from synapse.util.check_dependencies import check_requirements +CONFLICTING_PASSWORD_OPTS_ERROR = """\ +You have configured both `redis.password` and `redis.password_path`. +These are mutually incompatible. +""" + class RedisConfig(Config): section = "redis" - def read_config(self, config: JsonDict, **kwargs: Any) -> None: + def read_config( + self, config: JsonDict, allow_secrets_in_config: bool, **kwargs: Any + ) -> None: redis_config = config.get("redis") or {} self.redis_enabled = redis_config.get("enabled", False) @@ -43,6 +50,22 @@ class RedisConfig(Config): self.redis_path = redis_config.get("path", None) self.redis_dbid = redis_config.get("dbid", None) self.redis_password = redis_config.get("password") + if self.redis_password and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("redis", "password"), + ) + redis_password_path = redis_config.get("password_path") + if redis_password_path: + if self.redis_password: + raise ConfigError(CONFLICTING_PASSWORD_OPTS_ERROR) + self.redis_password = read_file( + redis_password_path, + ( + "redis", + "password_path", + ), + ).strip() self.redis_use_tls = redis_config.get("use_tls", False) self.redis_certificate = redis_config.get("certificate_file", None) diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index c7f3e6d35e..b3fa500d4e 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py
@@ -27,13 +27,6 @@ from synapse.config._base import Config, ConfigError, read_file from synapse.types import JsonDict, RoomAlias, UserID from synapse.util.stringutils import random_string_with_symbols, strtobool -NO_EMAIL_DELEGATE_ERROR = """\ -Delegation of email verification to an identity server is no longer supported. To -continue to allow users to add email addresses to their accounts, and use them for -password resets, configure Synapse with an SMTP server via the `email` setting, and -remove `account_threepid_delegates.email`. -""" - CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\ You have configured both `registration_shared_secret` and `registration_shared_secret_path`. These are mutually incompatible. @@ -43,7 +36,9 @@ You have configured both `registration_shared_secret` and class RegistrationConfig(Config): section = "registration" - def read_config(self, config: JsonDict, **kwargs: Any) -> None: + def read_config( + self, config: JsonDict, allow_secrets_in_config: bool, **kwargs: Any + ) -> None: self.enable_registration = strtobool( str(config.get("enable_registration", False)) ) @@ -56,18 +51,17 @@ class RegistrationConfig(Config): str(config.get("enable_registration_without_verification", False)) ) - self.registrations_require_3pid = config.get("registrations_require_3pid", []) - self.allowed_local_3pids = config.get("allowed_local_3pids", []) - self.enable_3pid_lookup = config.get("enable_3pid_lookup", True) self.registration_requires_token = config.get( "registration_requires_token", False ) - self.enable_registration_token_3pid_bypass = config.get( - "enable_registration_token_3pid_bypass", False - ) # read the shared secret, either inline or from an external file self.registration_shared_secret = config.get("registration_shared_secret") + if self.registration_shared_secret and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("registration_shared_secret",), + ) registration_shared_secret_path = config.get("registration_shared_secret_path") if registration_shared_secret_path: if self.registration_shared_secret: @@ -78,16 +72,8 @@ class RegistrationConfig(Config): self.bcrypt_rounds = config.get("bcrypt_rounds", 12) - account_threepid_delegates = config.get("account_threepid_delegates") or {} - if "email" in account_threepid_delegates: - raise ConfigError(NO_EMAIL_DELEGATE_ERROR) - self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn") - self.default_identity_server = config.get("default_identity_server") self.allow_guest_access = config.get("allow_guest_access", False) - if config.get("invite_3pid_guest", False): - raise ConfigError("invite_3pid_guest is no longer supported") - self.auto_join_rooms = config.get("auto_join_rooms", []) for room_alias in self.auto_join_rooms: if not RoomAlias.is_valid(room_alias): @@ -147,12 +133,9 @@ class RegistrationConfig(Config): .get("msc3861", {}) .get("enabled", False) ) - self.enable_3pid_changes = config.get( - "enable_3pid_changes", not msc3861_enabled - ) - self.disable_msisdn_registration = config.get( - "disable_msisdn_registration", False + self.allow_underscore_prefixed_localpart = config.get( + "allow_underscore_prefixed_localpart", False ) session_lifetime = config.get("session_lifetime") diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 97ce6de528..fc5a90c85a 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py
@@ -22,7 +22,7 @@ import logging import os from typing import Any, Dict, List, Tuple -from urllib.request import getproxies_environment # type: ignore +from urllib.request import getproxies_environment import attr @@ -272,9 +272,7 @@ class ContentRepositoryConfig(Config): remote_media_lifetime ) - self.enable_authenticated_media = config.get( - "enable_authenticated_media", False - ) + self.enable_authenticated_media = config.get("enable_authenticated_media", True) def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str: assert data_dir_path is not None diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py
index 704895cf9a..f0349b68f2 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py
@@ -54,9 +54,7 @@ class RoomDirectoryConfig(Config): for rule in room_list_publication_rules ] else: - self._room_list_publication_rules = [ - _RoomDirectoryRule("room_list_publication_rules", {"action": "allow"}) - ] + self._room_list_publication_rules = [] def is_alias_creation_allowed(self, user_id: str, room_id: str, alias: str) -> bool: """Checks if the given user is allowed to create the given alias diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py deleted file mode 100644
index 9d7ef94507..0000000000 --- a/synapse/config/saml2.py +++ /dev/null
@@ -1,248 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2019-2021 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -import logging -from typing import Any, List, Set - -from synapse.config.sso import SsoAttributeRequirement -from synapse.types import JsonDict -from synapse.util.check_dependencies import check_requirements -from synapse.util.module_loader import load_module, load_python_module - -from ._base import Config, ConfigError -from ._util import validate_config - -logger = logging.getLogger(__name__) - -DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.saml.DefaultSamlMappingProvider" -# The module that DefaultSamlMappingProvider is in was renamed, we want to -# transparently handle both the same. -LEGACY_USER_MAPPING_PROVIDER = ( - "synapse.handlers.saml_handler.DefaultSamlMappingProvider" -) - - -def _dict_merge(merge_dict: dict, into_dict: dict) -> None: - """Do a deep merge of two dicts - - Recursively merges `merge_dict` into `into_dict`: - * For keys where both `merge_dict` and `into_dict` have a dict value, the values - are recursively merged - * For all other keys, the values in `into_dict` (if any) are overwritten with - the value from `merge_dict`. - - Args: - merge_dict: dict to merge - into_dict: target dict to be modified - """ - for k, v in merge_dict.items(): - if k not in into_dict: - into_dict[k] = v - continue - - current_val = into_dict[k] - - if isinstance(v, dict) and isinstance(current_val, dict): - _dict_merge(v, current_val) - continue - - # otherwise we just overwrite - into_dict[k] = v - - -class SAML2Config(Config): - section = "saml2" - - def read_config(self, config: JsonDict, **kwargs: Any) -> None: - self.saml2_enabled = False - - saml2_config = config.get("saml2_config") - - if not saml2_config or not saml2_config.get("enabled", True): - return - - if not saml2_config.get("sp_config") and not saml2_config.get("config_path"): - return - - check_requirements("saml2") - - self.saml2_enabled = True - - attribute_requirements = saml2_config.get("attribute_requirements") or [] - self.attribute_requirements = _parse_attribute_requirements_def( - attribute_requirements - ) - - self.saml2_grandfathered_mxid_source_attribute = saml2_config.get( - "grandfathered_mxid_source_attribute", "uid" - ) - - # refers to a SAML IdP entity ID - self.saml2_idp_entityid = saml2_config.get("idp_entityid", None) - - # IdP properties for Matrix clients - self.idp_name = saml2_config.get("idp_name", "SAML") - self.idp_icon = saml2_config.get("idp_icon") - self.idp_brand = saml2_config.get("idp_brand") - - # user_mapping_provider may be None if the key is present but has no value - ump_dict = saml2_config.get("user_mapping_provider") or {} - - # Use the default user mapping provider if not set - ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) - if ump_dict.get("module") == LEGACY_USER_MAPPING_PROVIDER: - ump_dict["module"] = DEFAULT_USER_MAPPING_PROVIDER - - # Ensure a config is present - ump_dict["config"] = ump_dict.get("config") or {} - - if ump_dict["module"] == DEFAULT_USER_MAPPING_PROVIDER: - # Load deprecated options for use by the default module - old_mxid_source_attribute = saml2_config.get("mxid_source_attribute") - if old_mxid_source_attribute: - logger.warning( - "The config option saml2_config.mxid_source_attribute is deprecated. " - "Please use saml2_config.user_mapping_provider.config" - ".mxid_source_attribute instead." - ) - ump_dict["config"]["mxid_source_attribute"] = old_mxid_source_attribute - - old_mxid_mapping = saml2_config.get("mxid_mapping") - if old_mxid_mapping: - logger.warning( - "The config option saml2_config.mxid_mapping is deprecated. Please " - "use saml2_config.user_mapping_provider.config.mxid_mapping instead." - ) - ump_dict["config"]["mxid_mapping"] = old_mxid_mapping - - # Retrieve an instance of the module's class - # Pass the config dictionary to the module for processing - ( - self.saml2_user_mapping_provider_class, - self.saml2_user_mapping_provider_config, - ) = load_module(ump_dict, ("saml2_config", "user_mapping_provider")) - - # Ensure loaded user mapping module has defined all necessary methods - # Note parse_config() is already checked during the call to load_module - required_methods = [ - "get_saml_attributes", - "saml_response_to_user_attributes", - "get_remote_user_id", - ] - missing_methods = [ - method - for method in required_methods - if not hasattr(self.saml2_user_mapping_provider_class, method) - ] - if missing_methods: - raise ConfigError( - "Class specified by saml2_config." - "user_mapping_provider.module is missing required " - "methods: %s" % (", ".join(missing_methods),) - ) - - # Get the desired saml auth response attributes from the module - saml2_config_dict = self._default_saml_config_dict( - *self.saml2_user_mapping_provider_class.get_saml_attributes( - self.saml2_user_mapping_provider_config - ) - ) - _dict_merge( - merge_dict=saml2_config.get("sp_config", {}), into_dict=saml2_config_dict - ) - - config_path = saml2_config.get("config_path", None) - if config_path is not None: - mod = load_python_module(config_path) - config_dict_from_file = getattr(mod, "CONFIG", None) - if config_dict_from_file is None: - raise ConfigError( - "Config path specified by saml2_config.config_path does not " - "have a CONFIG property." - ) - _dict_merge(merge_dict=config_dict_from_file, into_dict=saml2_config_dict) - - import saml2.config - - self.saml2_sp_config = saml2.config.SPConfig() - self.saml2_sp_config.load(saml2_config_dict) - - # session lifetime: in milliseconds - self.saml2_session_lifetime = self.parse_duration( - saml2_config.get("saml_session_lifetime", "15m") - ) - - def _default_saml_config_dict( - self, required_attributes: Set[str], optional_attributes: Set[str] - ) -> JsonDict: - """Generate a configuration dictionary with required and optional attributes that - will be needed to process new user registration - - Args: - required_attributes: SAML auth response attributes that are - necessary to function - optional_attributes: SAML auth response attributes that can be used to add - additional information to Synapse user accounts, but are not required - - Returns: - A SAML configuration dictionary - """ - import saml2 - - if self.saml2_grandfathered_mxid_source_attribute: - optional_attributes.add(self.saml2_grandfathered_mxid_source_attribute) - optional_attributes -= required_attributes - - public_baseurl = self.root.server.public_baseurl - metadata_url = public_baseurl + "_synapse/client/saml2/metadata.xml" - response_url = public_baseurl + "_synapse/client/saml2/authn_response" - return { - "entityid": metadata_url, - "service": { - "sp": { - "endpoints": { - "assertion_consumer_service": [ - (response_url, saml2.BINDING_HTTP_POST) - ] - }, - "required_attributes": list(required_attributes), - "optional_attributes": list(optional_attributes), - # "name_id_format": saml2.saml.NAMEID_FORMAT_PERSISTENT, - } - }, - } - - -ATTRIBUTE_REQUIREMENTS_SCHEMA = { - "type": "array", - "items": SsoAttributeRequirement.JSON_SCHEMA, -} - - -def _parse_attribute_requirements_def( - attribute_requirements: Any, -) -> List[SsoAttributeRequirement]: - validate_config( - ATTRIBUTE_REQUIREMENTS_SCHEMA, - attribute_requirements, - config_path=("saml2_config", "attribute_requirements"), - ) - return [SsoAttributeRequirement(**x) for x in attribute_requirements] diff --git a/synapse/config/server.py b/synapse/config/server.py
index fd52c0475c..0844475b15 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py
@@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2014-2021 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -43,12 +43,6 @@ from ._util import validate_config logger = logging.Logger(__name__) -DIRECT_TCP_ERROR = """ -Using direct TCP replication for workers is no longer supported. - -Please see https://element-hq.github.io/synapse/latest/upgrade.html#direct-tcp-replication-is-no-longer-supported-migrate-to-redis -""" - # by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes # (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen # on IPv6 when '::' is set. @@ -166,13 +160,6 @@ ROOM_COMPLEXITY_TOO_GREAT = ( "to join this room." ) -METRICS_PORT_WARNING = """\ -The metrics_port configuration option is deprecated in Synapse 0.31 in favour of -a listener. Please see -https://element-hq.github.io/synapse/latest/metrics-howto.html -on how to configure the new listener. ---------------------------------------------------------------------------------""" - KNOWN_LISTENER_TYPES = { "http", @@ -215,9 +202,6 @@ class HttpListenerConfig: additional_resources: Dict[str, dict] = attr.Factory(dict) tag: Optional[str] = None request_id_header: Optional[str] = None - # If true, the listener will return CORS response headers compatible with MSC3886: - # https://github.com/matrix-org/matrix-spec-proposals/pull/3886 - experimental_cors_msc3886: bool = False @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -335,8 +319,14 @@ class ServerConfig(Config): logger.info("Using default public_baseurl %s", public_baseurl) else: self.serve_client_wellknown = True + # Ensure that public_baseurl ends with a trailing slash if public_baseurl[-1] != "/": public_baseurl += "/" + + # Scrutinize user-provided config + if not isinstance(public_baseurl, str): + raise ConfigError("Must be a string", ("public_baseurl",)) + self.public_baseurl = public_baseurl # check that public_baseurl is valid @@ -367,11 +357,6 @@ class ServerConfig(Config): "m.homeserver is not supported in extra_well_known_content, " "use public_baseurl in base config instead." ) - if "m.identity_server" in self.extra_well_known_client_content: - raise ConfigError( - "m.identity_server is not supported in extra_well_known_content, " - "use default_identity_server in base config instead." - ) # Whether to enable user presence. presence_config = config.get("presence") or {} @@ -479,10 +464,6 @@ class ServerConfig(Config): self.max_mau_value = config.get("max_mau_value", 0) self.mau_stats_only = config.get("mau_stats_only", False) - self.mau_limits_reserved_threepids = config.get( - "mau_limit_reserved_threepids", [] - ) - self.mau_trial_days = config.get("mau_trial_days", 0) self.mau_appservice_trial_days = config.get("mau_appservice_trial_days", {}) self.mau_limit_alerting = config.get("mau_limit_alerting", True) @@ -700,21 +681,6 @@ class ServerConfig(Config): pub_key=manhole_pub_key, ) - metrics_port = config.get("metrics_port") - if metrics_port: - logger.warning(METRICS_PORT_WARNING) - - self.listeners.append( - TCPListenerConfig( - port=metrics_port, - bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")], - type="http", - http_options=HttpListenerConfig( - resources=[HttpResourceConfig(names=["metrics"])] - ), - ) - ) - self.cleanup_extremities_with_dummy_events = config.get( "cleanup_extremities_with_dummy_events", True ) @@ -724,18 +690,6 @@ class ServerConfig(Config): self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False) - # Inhibits the /requestToken endpoints from returning an error that might leak - # information about whether an e-mail address is in use or not on this - # homeserver, and instead return a 200 with a fake sid if this kind of error is - # met, without sending anything. - # This is a compromise between sending an email, which could be a spam vector, - # and letting the client know which email address is bound to an account and - # which one isn't. - self.request_token_inhibit_3pid_errors = config.get( - "request_token_inhibit_3pid_errors", - False, - ) - # Whitelist of domain names that given next_link parameters must have next_link_domain_whitelist: Optional[List[str]] = config.get( "next_link_domain_whitelist" @@ -780,6 +734,17 @@ class ServerConfig(Config): else: self.delete_stale_devices_after = None + # The maximum allowed delay duration for delayed events (MSC4140). + max_event_delay_duration = config.get("max_event_delay_duration") + if max_event_delay_duration is not None: + self.max_event_delay_ms: Optional[int] = self.parse_duration( + max_event_delay_duration + ) + if self.max_event_delay_ms <= 0: + raise ConfigError("max_event_delay_duration must be a positive value") + else: + self.max_event_delay_ms = None + def has_tls_listener(self) -> bool: return any(listener.is_tls() for listener in self.listeners) @@ -828,13 +793,10 @@ class ServerConfig(Config): ).lstrip() if not unsecure_listeners: - unsecure_http_bindings = ( - """- port: %(unsecure_port)s + unsecure_http_bindings = """- port: %(unsecure_port)s tls: false type: http - x_forwarded: true""" - % locals() - ) + x_forwarded: true""" % locals() if not open_private_ports: unsecure_http_bindings += ( @@ -853,16 +815,13 @@ class ServerConfig(Config): if not secure_listeners: secure_http_bindings = "" - return ( - """\ + return """\ server_name: "%(server_name)s" pid_file: %(pid_file)s listeners: %(secure_http_bindings)s %(unsecure_http_bindings)s - """ - % locals() - ) + """ % locals() def read_arguments(self, args: argparse.Namespace) -> None: if args.manhole is not None: @@ -915,24 +874,6 @@ class ServerConfig(Config): ) -def is_threepid_reserved( - reserved_threepids: List[JsonDict], threepid: JsonDict -) -> bool: - """Check the threepid against the reserved threepid config - Args: - reserved_threepids: List of reserved threepids - threepid: The threepid to test for - - Returns: - Is the threepid undertest reserved_user - """ - - for tp in reserved_threepids: - if threepid["medium"] == tp["medium"] and threepid["address"] == tp["address"]: - return True - return False - - def read_gc_thresholds( thresholds: Optional[List[Any]], ) -> Optional[Tuple[int, int, int]]: @@ -956,9 +897,6 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig: raise ConfigError("Expected a dictionary", ("listeners", str(num))) listener_type = listener["type"] - # Raise a helpful error if direct TCP replication is still configured. - if listener_type == "replication": - raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type")) port = listener.get("port") socket_path = listener.get("path") @@ -999,7 +937,6 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig: additional_resources=listener.get("additional_resources", {}), tag=listener.get("tag"), request_id_header=listener.get("request_id_header"), - experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False), ) if socket_path: diff --git a/synapse/config/sso.py b/synapse/config/sso.py
index d7a2187e7d..cf27a7ee13 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py
@@ -19,7 +19,7 @@ # # import logging -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional import attr @@ -43,13 +43,18 @@ class SsoAttributeRequirement: """Object describing a single requirement for SSO attributes.""" attribute: str - # If a value is not given, than the attribute must simply exist. - value: Optional[str] + # If neither `value` nor `one_of` is given, the attribute must simply exist. + value: Optional[str] = None + one_of: Optional[List[str]] = None JSON_SCHEMA = { "type": "object", - "properties": {"attribute": {"type": "string"}, "value": {"type": "string"}}, - "required": ["attribute", "value"], + "properties": { + "attribute": {"type": "string"}, + "value": {"type": "string"}, + "one_of": {"type": "array", "items": {"type": "string"}}, + }, + "required": ["attribute"], } diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index 51dc15eb61..a48d81fdc3 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py
@@ -108,8 +108,7 @@ class TlsConfig(Config): # Raise an error if this option has been specified without any # corresponding certificates. raise ConfigError( - "federation_custom_ca_list specified without " - "any certificate files" + "federation_custom_ca_list specified without any certificate files" ) certs = [] diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py
index c67796906f..fe4e2dc65c 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py
@@ -38,6 +38,9 @@ class UserDirectoryConfig(Config): self.user_directory_search_all_users = user_directory_config.get( "search_all_users", False ) + self.user_directory_exclude_remote_users = user_directory_config.get( + "exclude_remote_users", False + ) self.user_directory_search_prefer_local_users = user_directory_config.get( "prefer_local_users", False ) diff --git a/synapse/config/user_types.py b/synapse/config/user_types.py new file mode 100644
index 0000000000..2d9c9f7afb --- /dev/null +++ b/synapse/config/user_types.py
@@ -0,0 +1,44 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +from typing import Any, List, Optional + +from synapse.api.constants import UserTypes +from synapse.types import JsonDict + +from ._base import Config, ConfigError + + +class UserTypesConfig(Config): + section = "user_types" + + def read_config(self, config: JsonDict, **kwargs: Any) -> None: + user_types: JsonDict = config.get("user_types", {}) + + self.default_user_type: Optional[str] = user_types.get( + "default_user_type", None + ) + self.extra_user_types: List[str] = user_types.get("extra_user_types", []) + + all_user_types: List[str] = [] + all_user_types.extend(UserTypes.ALL_BUILTIN_USER_TYPES) + all_user_types.extend(self.extra_user_types) + + self.all_user_types = all_user_types + + if self.default_user_type is not None: + if self.default_user_type not in all_user_types: + raise ConfigError( + f"Default user type {self.default_user_type} is not in the list of all user types: {all_user_types}" + ) diff --git a/synapse/config/voip.py b/synapse/config/voip.py
index 6fe43a9e32..f33602d975 100644 --- a/synapse/config/voip.py +++ b/synapse/config/voip.py
@@ -23,15 +23,34 @@ from typing import Any from synapse.types import JsonDict -from ._base import Config +from ._base import Config, ConfigError, read_file + +CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\ +You have configured both `turn_shared_secret` and `turn_shared_secret_path`. +These are mutually incompatible. +""" class VoipConfig(Config): section = "voip" - def read_config(self, config: JsonDict, **kwargs: Any) -> None: + def read_config( + self, config: JsonDict, allow_secrets_in_config: bool, **kwargs: Any + ) -> None: self.turn_uris = config.get("turn_uris", []) self.turn_shared_secret = config.get("turn_shared_secret") + if self.turn_shared_secret and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("turn_shared_secret",), + ) + turn_shared_secret_path = config.get("turn_shared_secret_path") + if turn_shared_secret_path: + if self.turn_shared_secret: + raise ConfigError(CONFLICTING_SHARED_SECRET_OPTS_ERROR) + self.turn_shared_secret = read_file( + turn_shared_secret_path, ("turn_shared_secret_path",) + ).strip() self.turn_username = config.get("turn_username") self.turn_password = config.get("turn_password") self.turn_user_lifetime = self.parse_duration( diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 7ecf349e4a..1685468773 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py
@@ -22,26 +22,26 @@ import argparse import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Union import attr -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel, Extra, StrictBool, StrictInt, StrictStr -else: - from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr - +from synapse._pydantic_compat import ( + BaseModel, + Extra, + StrictBool, + StrictInt, + StrictStr, +) from synapse.config._base import ( Config, ConfigError, RoutableShardedWorkerHandlingConfig, ShardedWorkerHandlingConfig, + read_file, ) from synapse.config._util import parse_and_validate_mapping from synapse.config.server import ( - DIRECT_TCP_ERROR, TCPListenerConfig, parse_listener_def, ) @@ -65,6 +65,11 @@ configuration under `main` inside the `instance_map`. See workers documentation `https://element-hq.github.io/synapse/latest/workers.html#worker-configuration` """ +CONFLICTING_WORKER_REPLICATION_SECRET_OPTS_ERROR = """\ +Conflicting options 'worker_replication_secret' and +'worker_replication_secret_path' are both defined in config file. +""" + # This allows for a handy knob when it's time to change from 'master' to # something with less 'history' MAIN_PROCESS_INSTANCE_NAME = "master" @@ -218,7 +223,9 @@ class WorkerConfig(Config): section = "worker" - def read_config(self, config: JsonDict, **kwargs: Any) -> None: + def read_config( + self, config: JsonDict, allow_secrets_in_config: bool, **kwargs: Any + ) -> None: self.worker_app = config.get("worker_app") # Canonicalise worker_app so that master always has None @@ -237,12 +244,24 @@ class WorkerConfig(Config): raise ConfigError("worker_log_config must be a string") self.worker_log_config = worker_log_config - # The port on the main synapse for TCP replication - if "worker_replication_port" in config: - raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",)) - # The shared secret used for authentication when connecting to the main synapse. - self.worker_replication_secret = config.get("worker_replication_secret", None) + worker_replication_secret = config.get("worker_replication_secret", None) + if worker_replication_secret and not allow_secrets_in_config: + raise ConfigError( + "Config options that expect an in-line secret as value are disabled", + ("worker_replication_secret",), + ) + worker_replication_secret_path = config.get( + "worker_replication_secret_path", None + ) + if worker_replication_secret_path: + if worker_replication_secret: + raise ConfigError(CONFLICTING_WORKER_REPLICATION_SECRET_OPTS_ERROR) + self.worker_replication_secret = read_file( + worker_replication_secret_path, ("worker_replication_secret_path",) + ).strip() + else: + self.worker_replication_secret = worker_replication_secret self.worker_name = config.get("worker_name", self.worker_app) self.instance_name = self.worker_name or MAIN_PROCESS_INSTANCE_NAME @@ -328,10 +347,11 @@ class WorkerConfig(Config): ) # type-ignore: the expression `Union[A, B]` is not a Type[Union[A, B]] currently - self.instance_map: Dict[ - str, InstanceLocationConfig - ] = parse_and_validate_mapping( - instance_map, InstanceLocationConfig # type: ignore[arg-type] + self.instance_map: Dict[str, InstanceLocationConfig] = ( + parse_and_validate_mapping( + instance_map, + InstanceLocationConfig, # type: ignore[arg-type] + ) ) # Map from type of streams to source, c.f. WriterLocations. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py
index 8c301e077c..643d2d4e66 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py
@@ -589,7 +589,7 @@ class BaseV2KeyFetcher(KeyFetcher): % (server_name,) ) - for key_id, key_data in response_json["old_verify_keys"].items(): + for key_id, key_data in response_json.get("old_verify_keys", {}).items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index f5abcde2db..5999c264dc 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py
@@ -32,6 +32,7 @@ from typing import ( Mapping, MutableMapping, Optional, + Protocol, Set, Tuple, Union, @@ -41,7 +42,6 @@ from typing import ( from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes from signedjson.sign import SignatureVerifyException, verify_signed_json -from typing_extensions import Protocol from unpaddedbase64 import decode_base64 from synapse.api.constants import ( @@ -388,6 +388,7 @@ LENIENT_EVENT_BYTE_LIMITS_ROOM_VERSIONS = { RoomVersions.V9, RoomVersions.V10, RoomVersions.MSC1767v10, + RoomVersions.MSC3757v10, } @@ -565,6 +566,7 @@ def _is_membership_change_allowed( logger.debug( "_is_membership_change_allowed: %s", { + "caller_membership": caller.membership if caller else None, "caller_in_room": caller_in_room, "caller_invited": caller_invited, "caller_knocked": caller_knocked, @@ -676,7 +678,8 @@ def _is_membership_change_allowed( and join_rule == JoinRules.KNOCK_RESTRICTED ) ): - if not caller_in_room and not caller_invited: + # You can only join the room if you are invited or are already in the room. + if not (caller_in_room or caller_invited): raise AuthError(403, "You are not invited to this room.") else: # TODO (erikj): may_join list @@ -790,9 +793,10 @@ def get_send_level( def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> bool: + state_key = event.get_state_key() power_levels_event = get_power_level_event(auth_events) - send_level = get_send_level(event.type, event.get("state_key"), power_levels_event) + send_level = get_send_level(event.type, state_key, power_levels_event) user_level = get_user_power_level(event.user_id, auth_events) if user_level < send_level: @@ -803,11 +807,34 @@ def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> b errcode=Codes.INSUFFICIENT_POWER, ) - # Check state_key - if hasattr(event, "state_key"): - if event.state_key.startswith("@"): - if event.state_key != event.user_id: - raise AuthError(403, "You are not allowed to set others state") + if ( + state_key is not None + and state_key.startswith("@") + and state_key != event.user_id + ): + if event.room_version.msc3757_enabled: + try: + colon_idx = state_key.index(":", 1) + suffix_idx = state_key.find("_", colon_idx + 1) + state_key_user_id = ( + state_key[:suffix_idx] if suffix_idx != -1 else state_key + ) + if not UserID.is_valid(state_key_user_id): + raise ValueError + except ValueError: + raise SynapseError( + 400, + "State key neither equals a valid user ID, nor starts with one plus an underscore", + errcode=Codes.BAD_JSON, + ) + if ( + # sender is owner of the state key + state_key_user_id == event.user_id + # sender has higher PL than the owner of the state key + or user_level > get_user_power_level(state_key_user_id, auth_events) + ): + return True + raise AuthError(403, "You are not allowed to set others state") return True @@ -887,7 +914,8 @@ def _check_power_levels( raise SynapseError(400, f"{v!r} must be an integer.") if k in {"events", "notifications", "users"}: if not isinstance(v, collections.abc.Mapping) or not all( - type(v) is int for v in v.values() # noqa: E721 + type(v) is int + for v in v.values() # noqa: E721 ): raise SynapseError( 400, @@ -958,8 +986,7 @@ def _check_power_levels( if old_level == user_level: raise AuthError( 403, - "You don't have permission to remove ops level equal " - "to your own", + "You don't have permission to remove ops level equal to your own", ) # Check if the old and new levels are greater than the user level diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 2e56b671f0..a85e66d6bf 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py
@@ -22,7 +22,6 @@ import abc import collections.abc -import os from typing import ( TYPE_CHECKING, Any, @@ -30,6 +29,7 @@ from typing import ( Generic, Iterable, List, + Literal, Optional, Tuple, Type, @@ -39,30 +39,29 @@ from typing import ( ) import attr -from typing_extensions import Literal from unpaddedbase64 import encode_base64 -from synapse.api.constants import RelationTypes +from synapse.api.constants import EventTypes, RelationTypes from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions from synapse.synapse_rust.events import EventInternalMetadata from synapse.types import JsonDict, StrCollection from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze -from synapse.util.stringutils import strtobool if TYPE_CHECKING: from synapse.events.builder import EventBuilder -# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents -# bugs where we accidentally share e.g. signature dicts. However, converting a -# dict to frozen_dicts is expensive. -# -# NOTE: This is overridden by the configuration by the Synapse worker apps, but -# for the sake of tests, it is set here while it cannot be configured on the -# homeserver object itself. -USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0")) +USE_FROZEN_DICTS = False +""" +Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents +bugs where we accidentally share e.g. signature dicts. However, converting a +dict to frozen_dicts is expensive. +NOTE: This is overridden by the configuration by the Synapse worker apps, but +for the sake of tests, it is set here because it cannot be configured on the +homeserver object itself. +""" T = TypeVar("T") @@ -325,12 +324,17 @@ class EventBase(metaclass=abc.ABCMeta): def __repr__(self) -> str: rejection = f"REJECTED={self.rejected_reason}, " if self.rejected_reason else "" + conditional_membership_string = "" + if self.get("type") == EventTypes.Member: + conditional_membership_string = f"membership={self.membership}, " + return ( f"<{self.__class__.__name__} " f"{rejection}" f"event_id={self.event_id}, " f"type={self.get('type')}, " f"state_key={self.get('state_key')}, " + f"{conditional_membership_string}" f"outlier={self.internal_metadata.is_outlier()}" ">" ) diff --git a/synapse/events/auto_accept_invites.py b/synapse/events/auto_accept_invites.py
index d88ec51d9d..4295107c47 100644 --- a/synapse/events/auto_accept_invites.py +++ b/synapse/events/auto_accept_invites.py
@@ -66,49 +66,66 @@ class InviteAutoAccepter: event: The incoming event. """ # Check if the event is an invite for a local user. - is_invite_for_local_user = ( - event.type == EventTypes.Member - and event.is_state() - and event.membership == Membership.INVITE - and self._api.is_mine(event.state_key) - ) + if ( + event.type != EventTypes.Member + or event.is_state() is False + or event.membership != Membership.INVITE + or self._api.is_mine(event.state_key) is False + ): + return # Only accept invites for direct messages if the configuration mandates it. is_direct_message = event.content.get("is_direct", False) - is_allowed_by_direct_message_rules = ( - not self._config.accept_invites_only_for_direct_messages - or is_direct_message is True - ) + if ( + self._config.accept_invites_only_for_direct_messages + and is_direct_message is False + ): + return # Only accept invites from remote users if the configuration mandates it. is_from_local_user = self._api.is_mine(event.sender) - is_allowed_by_local_user_rules = ( - not self._config.accept_invites_only_from_local_users - or is_from_local_user is True - ) - if ( - is_invite_for_local_user - and is_allowed_by_direct_message_rules - and is_allowed_by_local_user_rules + self._config.accept_invites_only_from_local_users + and is_from_local_user is False ): - # Make the user join the room. We run this as a background process to circumvent a race condition - # that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12) - run_as_background_process( - "retry_make_join", - self._retry_make_join, - event.state_key, - event.state_key, - event.room_id, - "join", - bg_start_span=False, - ) + return - if is_direct_message: - # Mark this room as a direct message! - await self._mark_room_as_direct_message( - event.state_key, event.sender, event.room_id - ) + # Check the user is activated. + recipient = await self._api.get_userinfo_by_id(event.state_key) + + # Ignore if the user doesn't exist. + if recipient is None: + return + + # Never accept invites for deactivated users. + if recipient.is_deactivated: + return + + # Never accept invites for suspended users. + if recipient.suspended: + return + + # Never accept invites for locked users. + if recipient.locked: + return + + # Make the user join the room. We run this as a background process to circumvent a race condition + # that occurs when responding to invites over federation (see https://github.com/matrix-org/synapse-auto-accept-invite/issues/12) + run_as_background_process( + "retry_make_join", + self._retry_make_join, + event.state_key, + event.state_key, + event.room_id, + "join", + bg_start_span=False, + ) + + if is_direct_message: + # Mark this room as a direct message! + await self._mark_room_as_direct_message( + event.state_key, event.sender, event.room_id + ) async def _mark_room_as_direct_message( self, user_id: str, dm_user_id: str, room_id: str diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 10ef01131b..76df083d69 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py
@@ -24,7 +24,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import attr from signedjson.types import SigningKey -from synapse.api.constants import MAX_DEPTH +from synapse.api.constants import MAX_DEPTH, EventTypes from synapse.api.room_versions import ( KNOWN_EVENT_FORMAT_VERSIONS, EventFormatVersions, @@ -109,6 +109,19 @@ class EventBuilder: def is_state(self) -> bool: return self._state_key is not None + def is_mine_id(self, user_id: str) -> bool: + """Determines whether a user ID or room alias originates from this homeserver. + + Returns: + `True` if the hostname part of the user ID or room alias matches this + homeserver. + `False` otherwise, or if the user ID or room alias is malformed. + """ + localpart_hostname = user_id.split(":", 1) + if len(localpart_hostname) < 2: + return False + return localpart_hostname[1] == self._hostname + async def build( self, prev_event_ids: List[str], @@ -142,6 +155,46 @@ class EventBuilder: self, state_ids ) + # Check for out-of-band membership that may have been exposed on `/sync` but + # the events have not been de-outliered yet so they won't be part of the + # room state yet. + # + # This helps in situations where a remote homeserver invites a local user to + # a room that we're already participating in; and we've persisted the invite + # as an out-of-band membership (outlier), but it hasn't been pushed to us as + # part of a `/send` transaction yet and de-outliered. This also helps for + # any of the other out-of-band membership transitions. + # + # As an optimization, we could check if the room state already includes a + # non-`leave` membership event, then we can assume the membership event has + # been de-outliered and we don't need to check for an out-of-band + # membership. But we don't have the necessary information from a + # `StateMap[str]` and we'll just have to take the hit of this extra lookup + # for any membership event for now. + if self.type == EventTypes.Member and self.is_mine_id(self.state_key): + ( + _membership, + member_event_id, + ) = await self._store.get_local_current_membership_for_user_in_room( + user_id=self.state_key, + room_id=self.room_id, + ) + # There is no need to check if the membership is actually an + # out-of-band membership (`outlier`) as we would end up with the + # same result either way (adding the member event to the + # `auth_event_ids`). + if ( + member_event_id is not None + # We only need to be careful about duplicating the event in the + # `auth_event_ids` list (duplicate `type`/`state_key` is part of the + # authorization rules) + and member_event_id not in auth_event_ids + ): + auth_event_ids.append(member_event_id) + # Also make sure to point to the previous membership event that will + # allow this one to happen so the computed state works out. + prev_event_ids.append(member_event_id) + format_version = self.room_version.event_format # The types of auth/prev events changes between event versions. prev_events: Union[StrCollection, List[Tuple[str, Dict[str, str]]]] diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py
index 9cb053cd8e..9713b141bc 100644 --- a/synapse/events/presence_router.py +++ b/synapse/events/presence_router.py
@@ -80,7 +80,7 @@ def load_legacy_presence_router(hs: "HomeServer") -> None: # All methods that the module provides should be async, but this wasn't enforced # in the old module system, so we wrap them if needed def async_wrapper( - f: Optional[Callable[P, R]] + f: Optional[Callable[P, R]], ) -> Optional[Callable[P, Awaitable[R]]]: # f might be None if the callback isn't implemented by the module. In this # case we don't want to register a callback at all so we return None. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py
index 6b70ea94d1..0bca4c188b 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py
@@ -248,7 +248,7 @@ class EventContext(UnpersistedEventContextBase): @tag_args async def get_current_state_ids( self, state_filter: Optional["StateFilter"] = None - ) -> Optional[StateMap[str]]: + ) -> StateMap[str]: """ Gets the room state map, including this event - ie, the state in ``state_group`` @@ -256,13 +256,12 @@ class EventContext(UnpersistedEventContextBase): not make it into the room state. This method will raise an exception if ``rejected`` is set. + It is also an error to access this for an outlier event. + Arg: state_filter: specifies the type of state event to fetch from DB, example: EventTypes.JoinRules Returns: - Returns None if state_group is None, which happens when the associated - event is an outlier. - Maps a (type, state_key) to the event ID of the state event matching this tuple. """ @@ -300,7 +299,8 @@ class EventContext(UnpersistedEventContextBase): this tuple. """ - assert self.state_group_before_event is not None + if self.state_group_before_event is None: + return {} return await self._storage.state.get_state_ids_for_group( self.state_group_before_event, state_filter ) @@ -504,7 +504,7 @@ class UnpersistedEventContext(UnpersistedEventContextBase): def _encode_state_group_delta( - state_group_delta: Dict[Tuple[int, int], StateMap[str]] + state_group_delta: Dict[Tuple[int, int], StateMap[str]], ) -> List[Tuple[int, int, Optional[List[Tuple[str, str, str]]]]]: if not state_group_delta: return [] @@ -517,7 +517,7 @@ def _encode_state_group_delta( def _decode_state_group_delta( - input: List[Tuple[int, int, List[Tuple[str, str, str]]]] + input: List[Tuple[int, int, List[Tuple[str, str, str]]]], ) -> Dict[Tuple[int, int], StateMap[str]]: if not input: return {} @@ -544,7 +544,7 @@ def _encode_state_dict( def _decode_state_dict( - input: Optional[List[Tuple[str, str, str]]] + input: Optional[List[Tuple[str, str, str]]], ) -> Optional[StateMap[str]]: """Decodes a state dict encoded using `_encode_state_dict` above""" if input is None: diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 54f94add4d..eb18ba2db7 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py
@@ -40,6 +40,8 @@ import attr from canonicaljson import encode_canonical_json from synapse.api.constants import ( + CANONICALJSON_MAX_INT, + CANONICALJSON_MIN_INT, MAX_PDU_SIZE, EventContentFields, EventTypes, @@ -61,9 +63,6 @@ SPLIT_FIELD_REGEX = re.compile(r"\\*\.") # Find escaped characters, e.g. those with a \ in front of them. ESCAPE_SEQUENCE_PATTERN = re.compile(r"\\(.)") -CANONICALJSON_MAX_INT = (2**53) - 1 -CANONICALJSON_MIN_INT = -CANONICALJSON_MAX_INT - # Module API callback that allows adding fields to the unsigned section of # events that are sent to clients. diff --git a/synapse/events/validator.py b/synapse/events/validator.py
index 73b63b77f2..d1fb026cd6 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py
@@ -19,17 +19,11 @@ # # import collections.abc -from typing import TYPE_CHECKING, List, Type, Union, cast +from typing import List, Type, Union, cast import jsonschema -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import Field, StrictBool, StrictStr -else: - from pydantic import Field, StrictBool, StrictStr - +from synapse._pydantic_compat import Field, StrictBool, StrictStr from synapse.api.constants import ( MAX_ALIAS_LENGTH, EventContentFields, @@ -92,9 +86,7 @@ class EventValidator: # Depending on the room version, ensure the data is spec compliant JSON. if event.room_version.strict_canonicaljson: - # Note that only the client controlled portion of the event is - # checked, since we trust the portions of the event we created. - validate_canonicaljson(event.content) + validate_canonicaljson(event.get_pdu_json()) if event.type == EventTypes.Aliases: if "aliases" in event.content: diff --git a/synapse/federation/__init__.py b/synapse/federation/__init__.py
index a571eff590..61e28bff66 100644 --- a/synapse/federation/__init__.py +++ b/synapse/federation/__init__.py
@@ -19,5 +19,4 @@ # # -""" This package includes all the federation specific logic. -""" +"""This package includes all the federation specific logic.""" diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index b101a389ef..45593430e8 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py
@@ -20,7 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Optional +from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Sequence from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership from synapse.api.errors import Codes, SynapseError @@ -29,6 +29,8 @@ from synapse.crypto.event_signing import check_event_content_hash from synapse.crypto.keyring import Keyring from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import prune_event, validate_canonicaljson +from synapse.federation.units import filter_pdus_for_valid_depth +from synapse.handlers.room_policy import RoomPolicyHandler from synapse.http.servlet import assert_params_in_dict from synapse.logging.opentracing import log_kv, trace from synapse.types import JsonDict, get_domain_from_id @@ -63,6 +65,24 @@ class FederationBase: self._clock = hs.get_clock() self._storage_controllers = hs.get_storage_controllers() + # We need to define this lazily otherwise we get a cyclic dependency. + # self._policy_handler = hs.get_room_policy_handler() + self._policy_handler: Optional[RoomPolicyHandler] = None + + def _lazily_get_policy_handler(self) -> RoomPolicyHandler: + """Lazily get the room policy handler. + + This is required to avoid an import cycle: RoomPolicyHandler requires a + FederationClient, which requires a FederationBase, which requires a + RoomPolicyHandler. + + Returns: + RoomPolicyHandler: The room policy handler. + """ + if self._policy_handler is None: + self._policy_handler = self.hs.get_room_policy_handler() + return self._policy_handler + @trace async def _check_sigs_and_hash( self, @@ -79,6 +99,10 @@ class FederationBase: Also runs the event through the spam checker; if it fails, redacts the event and flags it as soft-failed. + Also checks that the event is allowed by the policy server, if the room uses + a policy server. If the event is not allowed, the event is flagged as + soft-failed but not redacted. + Args: room_version: The room version of the PDU pdu: the event to be checked @@ -144,6 +168,17 @@ class FederationBase: ) return redacted_event + policy_allowed = await self._lazily_get_policy_handler().is_event_allowed(pdu) + if not policy_allowed: + logger.warning( + "Event not allowed by policy server, soft-failing %s", pdu.event_id + ) + pdu.internal_metadata.soft_failed = True + # Note: we don't redact the event so admins can inspect the event after the + # fact. Other processes may redact the event, but that won't be applied to + # the database copy of the event until the server's config requires it. + return pdu + spam_check = await self._spam_checker_module_callbacks.check_event_for_spam(pdu) if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: @@ -267,6 +302,15 @@ def _is_invite_via_3pid(event: EventBase) -> bool: ) +def parse_events_from_pdu_json( + pdus_json: Sequence[JsonDict], room_version: RoomVersion +) -> List[EventBase]: + return [ + event_from_pdu_json(pdu_json, room_version) + for pdu_json in filter_pdus_for_valid_depth(pdus_json) + ] + + def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventBase: """Construct an EventBase from an event json received over federation diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index 7d80ff6998..7c485aa7e0 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py
@@ -68,12 +68,14 @@ from synapse.federation.federation_base import ( FederationBase, InvalidEventSignatureError, event_from_pdu_json, + parse_events_from_pdu_json, ) from synapse.federation.transport.client import SendJoinResponse from synapse.http.client import is_unknown_endpoint from synapse.http.types import QueryParams from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace from synapse.types import JsonDict, StrCollection, UserID, get_domain_from_id +from synapse.types.handlers.policy_server import RECOMMENDATION_OK, RECOMMENDATION_SPAM from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.retryutils import NotRetryingDestination @@ -349,7 +351,7 @@ class FederationClient(FederationBase): room_version = await self.store.get_room_version(room_id) - pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus] + pdus = parse_events_from_pdu_json(transaction_data_pdus, room_version) # Check signatures and hash of pdus, removing any from the list that fail checks pdus[:] = await self._check_sigs_and_hash_for_pulled_events_and_fetch( @@ -393,9 +395,7 @@ class FederationClient(FederationBase): transaction_data, ) - pdu_list: List[EventBase] = [ - event_from_pdu_json(p, room_version) for p in transaction_data["pdus"] - ] + pdu_list = parse_events_from_pdu_json(transaction_data["pdus"], room_version) if pdu_list and pdu_list[0]: pdu = pdu_list[0] @@ -424,6 +424,62 @@ class FederationClient(FederationBase): @trace @tag_args + async def get_pdu_policy_recommendation( + self, destination: str, pdu: EventBase, timeout: Optional[int] = None + ) -> str: + """Requests that the destination server (typically a policy server) + check the event and return its recommendation on how to handle the + event. + + If the policy server could not be contacted or the policy server + returned an unknown recommendation, this returns an OK recommendation. + This type fixing behaviour is done because the typical caller will be + in a critical call path and would generally interpret a `None` or similar + response as "weird value; don't care; move on without taking action". We + just frontload that logic here. + + + Args: + destination: The remote homeserver to ask (a policy server) + pdu: The event to check + timeout: How long to try (in ms) the destination for before + giving up. None indicates no timeout. + + Returns: + The policy recommendation, or RECOMMENDATION_OK if the policy server was + uncontactable or returned an unknown recommendation. + """ + + logger.debug( + "get_pdu_policy_recommendation for event_id=%s from %s", + pdu.event_id, + destination, + ) + + try: + res = await self.transport_layer.get_policy_recommendation_for_pdu( + destination, pdu, timeout=timeout + ) + recommendation = res.get("recommendation") + if not isinstance(recommendation, str): + raise InvalidResponseError("recommendation is not a string") + if recommendation not in (RECOMMENDATION_OK, RECOMMENDATION_SPAM): + logger.warning( + "get_pdu_policy_recommendation: unknown recommendation: %s", + recommendation, + ) + return RECOMMENDATION_OK + return recommendation + except Exception as e: + logger.warning( + "get_pdu_policy_recommendation: server %s responded with error, assuming OK recommendation: %s", + destination, + e, + ) + return RECOMMENDATION_OK + + @trace + @tag_args async def get_pdu( self, destinations: Collection[str], @@ -809,7 +865,7 @@ class FederationClient(FederationBase): room_version = await self.store.get_room_version(room_id) - auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]] + auth_chain = parse_events_from_pdu_json(res["auth_chain"], room_version) signed_auth = await self._check_sigs_and_hash_for_pulled_events_and_fetch( destination, auth_chain, room_version=room_version @@ -1529,9 +1585,7 @@ class FederationClient(FederationBase): room_version = await self.store.get_room_version(room_id) - events = [ - event_from_pdu_json(e, room_version) for e in content.get("events", []) - ] + events = parse_events_from_pdu_json(content.get("events", []), room_version) signed_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch( destination, events, room_version=room_version diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 1932fa82a4..2f2c78babc 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py
@@ -66,7 +66,7 @@ from synapse.federation.federation_base import ( event_from_pdu_json, ) from synapse.federation.persistence import TransactionActions -from synapse.federation.units import Edu, Transaction +from synapse.federation.units import Edu, Transaction, serialize_and_filter_pdus from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import ( @@ -469,7 +469,12 @@ class FederationServer(FederationBase): logger.info("Ignoring PDU: %s", e) continue - event = event_from_pdu_json(p, room_version) + try: + event = event_from_pdu_json(p, room_version) + except SynapseError as e: + logger.info("Ignoring PDU for failing to deserialize: %s", e) + continue + pdus_by_room.setdefault(room_id, []).append(event) if event.origin_server_ts > newest_pdu_ts: @@ -636,8 +641,8 @@ class FederationServer(FederationBase): ) return { - "pdus": [pdu.get_pdu_json() for pdu in pdus], - "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], + "pdus": serialize_and_filter_pdus(pdus), + "auth_chain": serialize_and_filter_pdus(auth_chain), } async def on_pdu_request( @@ -696,6 +701,12 @@ class FederationServer(FederationBase): pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) + if await self._spam_checker_module_callbacks.should_drop_federated_event(pdu): + logger.info( + "Federated event contains spam, dropping %s", + pdu.event_id, + ) + raise SynapseError(403, Codes.FORBIDDEN) try: pdu = await self._check_sigs_and_hash(room_version, pdu) except InvalidEventSignatureError as e: @@ -761,8 +772,8 @@ class FederationServer(FederationBase): event_json = event.get_pdu_json(time_now) resp = { "event": event_json, - "state": [p.get_pdu_json(time_now) for p in state_events], - "auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events], + "state": serialize_and_filter_pdus(state_events, time_now), + "auth_chain": serialize_and_filter_pdus(auth_chain_events, time_now), "members_omitted": caller_supports_partial_state, } @@ -1005,7 +1016,7 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() auth_pdus = await self.handler.on_event_auth(event_id) - res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]} + res = {"auth_chain": serialize_and_filter_pdus(auth_pdus, time_now)} return 200, res async def on_query_client_keys( @@ -1090,7 +1101,7 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() - return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} + return {"events": serialize_and_filter_pdus(missing_events, time_now)} async def on_openid_userinfo(self, token: str) -> Optional[str]: ts_now_ms = self._clock.time_msec() diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py
index 0bfde00315..8340b48503 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py
@@ -20,7 +20,7 @@ # # -""" This module contains all the persistence actions done by the federation +"""This module contains all the persistence actions done by the federation package. These actions are mostly only used by the :py:mod:`.replication` module. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 1888480881..2eef7b707d 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py
@@ -139,14 +139,13 @@ from typing import ( Hashable, Iterable, List, + Literal, Optional, - Set, Tuple, ) import attr from prometheus_client import Counter -from typing_extensions import Literal from twisted.internet import defer @@ -170,7 +169,13 @@ from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, ) -from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection +from synapse.types import ( + JsonDict, + ReadReceipt, + RoomStreamToken, + StrCollection, + get_domain_from_id, +) from synapse.util import Clock from synapse.util.metrics import Measure from synapse.util.retryutils import filter_destinations_by_retry_limiter @@ -297,12 +302,10 @@ class _DestinationWakeupQueue: # being woken up. _MAX_TIME_IN_QUEUE = 30.0 - # The maximum duration in seconds between waking up consecutive destination - # queues. - _MAX_DELAY = 0.1 - sender: "FederationSender" = attr.ib() clock: Clock = attr.ib() + max_delay_s: int = attr.ib() + queue: "OrderedDict[str, Literal[None]]" = attr.ib(factory=OrderedDict) processing: bool = attr.ib(default=False) @@ -332,13 +335,15 @@ class _DestinationWakeupQueue: # We also add an upper bound to the delay, to gracefully handle the # case where the queue only has a few entries in it. current_sleep_seconds = min( - self._MAX_DELAY, self._MAX_TIME_IN_QUEUE / len(self.queue) + self.max_delay_s, self._MAX_TIME_IN_QUEUE / len(self.queue) ) while self.queue: destination, _ = self.queue.popitem(last=False) queue = self.sender._get_per_destination_queue(destination) + if queue is None: + continue if not queue._new_data_to_send: # The per destination queue has already been woken up. @@ -416,19 +421,14 @@ class FederationSender(AbstractFederationSender): self._is_processing = False self._last_poked_id = -1 - # map from room_id to a set of PerDestinationQueues which we believe are - # awaiting a call to flush_read_receipts_for_room. The presence of an entry - # here for a given room means that we are rate-limiting RR flushes to that room, - # and that there is a pending call to _flush_rrs_for_room in the system. - self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {} + self._external_cache = hs.get_external_cache() - self._rr_txn_interval_per_room_ms = ( - 1000.0 - / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second + rr_txn_interval_per_room_s = ( + 1.0 / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second + ) + self._destination_wakeup_queue = _DestinationWakeupQueue( + self, self.clock, max_delay_s=rr_txn_interval_per_room_s ) - - self._external_cache = hs.get_external_cache() - self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock) # Regularly wake up destinations that have outstanding PDUs to be caught up self.clock.looping_call_now( @@ -438,12 +438,23 @@ class FederationSender(AbstractFederationSender): self._wake_destinations_needing_catchup, ) - def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue: + def _get_per_destination_queue( + self, destination: str + ) -> Optional[PerDestinationQueue]: """Get or create a PerDestinationQueue for the given destination Args: destination: server_name of remote server + + Returns: + None if the destination is not allowed by the federation whitelist. + Otherwise a PerDestinationQueue for this destination. """ + if not self.hs.config.federation.is_domain_allowed_according_to_federation_whitelist( + destination + ): + return None + queue = self._per_destination_queues.get(destination) if not queue: queue = PerDestinationQueue(self.hs, self._transaction_manager, destination) @@ -720,6 +731,16 @@ class FederationSender(AbstractFederationSender): # track the fact that we have a PDU for these destinations, # to allow us to perform catch-up later on if the remote is unreachable # for a while. + # Filter out any destinations not present in the federation_domain_whitelist, if + # the whitelist exists. These destinations should not be sent to so let's not + # waste time or space keeping track of events destined for them. + destinations = [ + d + for d in destinations + if self.hs.config.federation.is_domain_allowed_according_to_federation_whitelist( + d + ) + ] await self.store.store_destination_rooms_entries( destinations, pdu.room_id, @@ -734,7 +755,12 @@ class FederationSender(AbstractFederationSender): ) for destination in destinations: - self._get_per_destination_queue(destination).send_pdu(pdu) + queue = self._get_per_destination_queue(destination) + # We expect `queue` to not be None as we already filtered out + # non-whitelisted destinations above. + assert queue is not None + + queue.send_pdu(pdu) async def send_read_receipt(self, receipt: ReadReceipt) -> None: """Send a RR to any other servers in the room @@ -745,37 +771,48 @@ class FederationSender(AbstractFederationSender): # Some background on the rate-limiting going on here. # - # It turns out that if we attempt to send out RRs as soon as we get them from - # a client, then we end up trying to do several hundred Hz of federation - # transactions. (The number of transactions scales as O(N^2) on the size of a - # room, since in a large room we have both more RRs coming in, and more servers - # to send them to.) + # It turns out that if we attempt to send out RRs as soon as we get them + # from a client, then we end up trying to do several hundred Hz of + # federation transactions. (The number of transactions scales as O(N^2) + # on the size of a room, since in a large room we have both more RRs + # coming in, and more servers to send them to.) # - # This leads to a lot of CPU load, and we end up getting behind. The solution - # currently adopted is as follows: + # This leads to a lot of CPU load, and we end up getting behind. The + # solution currently adopted is to differentiate between receipts and + # destinations we should immediately send to, and those we can trickle + # the receipts to. # - # The first receipt in a given room is sent out immediately, at time T0. Any - # further receipts are, in theory, batched up for N seconds, where N is calculated - # based on the number of servers in the room to achieve a transaction frequency - # of around 50Hz. So, for example, if there were 100 servers in the room, then - # N would be 100 / 50Hz = 2 seconds. + # The current logic is to send receipts out immediately if: + # - the room is "small", i.e. there's only N servers to send receipts + # to, and so sending out the receipts immediately doesn't cause too + # much load; or + # - the receipt is for an event that happened recently, as users + # notice if receipts are delayed when they know other users are + # currently reading the room; or + # - the receipt is being sent to the server that sent the event, so + # that users see receipts for their own receipts quickly. # - # Then, after T+N, we flush out any receipts that have accumulated, and restart - # the timer to flush out more receipts at T+2N, etc. If no receipts accumulate, - # we stop the cycle and go back to the start. + # For destinations that we should delay sending the receipt to, we queue + # the receipts up to be sent in the next transaction, but don't trigger + # a new transaction to be sent. We then add the destination to the + # `DestinationWakeupQueue`, which will slowly iterate over each + # destination and trigger a new transaction to be sent. # - # However, in practice, it is often possible to flush out receipts earlier: in - # particular, if we are sending a transaction to a given server anyway (for - # example, because we have a PDU or a RR in another room to send), then we may - # as well send out all of the pending RRs for that server. So it may be that - # by the time we get to T+N, we don't actually have any RRs left to send out. - # Nevertheless we continue to buffer up RRs for the room in question until we - # reach the point that no RRs arrive between timer ticks. + # However, in practice, it is often possible to send out delayed + # receipts earlier: in particular, if we are sending a transaction to a + # given server anyway (for example, because we have a PDU or a RR in + # another room to send), then we may as well send out all of the pending + # RRs for that server. So it may be that by the time we get to waking up + # the destination, we don't actually have any RRs left to send out. # - # For even more background, see https://github.com/matrix-org/synapse/issues/4730. + # For even more background, see + # https://github.com/matrix-org/synapse/issues/4730. room_id = receipt.room_id + # Local read receipts always have 1 event ID. + event_id = receipt.event_ids[0] + # Work out which remote servers should be poked and poke them. domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation( room_id @@ -797,49 +834,55 @@ class FederationSender(AbstractFederationSender): if not domains: return - queues_pending_flush = self._queues_awaiting_rr_flush_by_room.get(room_id) + # We now split which domains we want to wake up immediately vs which we + # want to delay waking up. + immediate_domains: StrCollection + delay_domains: StrCollection - # if there is no flush yet scheduled, we will send out these receipts with - # immediate flushes, and schedule the next flush for this room. - if queues_pending_flush is not None: - logger.debug("Queuing receipt for: %r", domains) + if len(domains) < 10: + # For "small" rooms send to all domains immediately + immediate_domains = domains + delay_domains = () else: - logger.debug("Sending receipt to: %r", domains) - self._schedule_rr_flush_for_room(room_id, len(domains)) + metadata = await self.store.get_metadata_for_event( + receipt.room_id, event_id + ) + assert metadata is not None - for domain in domains: - queue = self._get_per_destination_queue(domain) - queue.queue_read_receipt(receipt) + sender_domain = get_domain_from_id(metadata.sender) - # if there is already a RR flush pending for this room, then make sure this - # destination is registered for the flush - if queues_pending_flush is not None: - queues_pending_flush.add(queue) + if self.clock.time_msec() - metadata.received_ts < 60_000: + # We always send receipts for recent messages immediately + immediate_domains = domains + delay_domains = () else: - queue.flush_read_receipts_for_room(room_id) - - def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None: - # that is going to cause approximately len(domains) transactions, so now back - # off for that multiplied by RR_TXN_INTERVAL_PER_ROOM - backoff_ms = self._rr_txn_interval_per_room_ms * n_domains - - logger.debug("Scheduling RR flush in %s in %d ms", room_id, backoff_ms) - self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id) - self._queues_awaiting_rr_flush_by_room[room_id] = set() - - def _flush_rrs_for_room(self, room_id: str) -> None: - queues = self._queues_awaiting_rr_flush_by_room.pop(room_id) - logger.debug("Flushing RRs in %s to %s", room_id, queues) - - if not queues: - # no more RRs arrived for this room; we are done. - return + # Otherwise, we delay waking up all destinations except for the + # sender's domain. + immediate_domains = [] + delay_domains = [] + for domain in domains: + if domain == sender_domain: + immediate_domains.append(domain) + else: + delay_domains.append(domain) + + for domain in immediate_domains: + # Add to destination queue and wake the destination up + queue = self._get_per_destination_queue(domain) + if queue is None: + continue + queue.queue_read_receipt(receipt) + queue.attempt_new_transaction() - # schedule the next flush - self._schedule_rr_flush_for_room(room_id, len(queues)) + for domain in delay_domains: + # Add to destination queue... + queue = self._get_per_destination_queue(domain) + if queue is None: + continue + queue.queue_read_receipt(receipt) - for queue in queues: - queue.flush_read_receipts_for_room(room_id) + # ... and schedule the destination to be woken up. + self._destination_wakeup_queue.add_to_queue(domain) async def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] @@ -871,9 +914,10 @@ class FederationSender(AbstractFederationSender): if self.is_mine_server_name(destination): continue - self._get_per_destination_queue(destination).send_presence( - states, start_loop=False - ) + queue = self._get_per_destination_queue(destination) + if queue is None: + continue + queue.send_presence(states, start_loop=False) self._destination_wakeup_queue.add_to_queue(destination) @@ -923,6 +967,8 @@ class FederationSender(AbstractFederationSender): return queue = self._get_per_destination_queue(edu.destination) + if queue is None: + return if key: queue.send_keyed_edu(edu, key) else: @@ -947,9 +993,15 @@ class FederationSender(AbstractFederationSender): for destination in destinations: if immediate: - self._get_per_destination_queue(destination).attempt_new_transaction() + queue = self._get_per_destination_queue(destination) + if queue is None: + continue + queue.attempt_new_transaction() else: - self._get_per_destination_queue(destination).mark_new_data() + queue = self._get_per_destination_queue(destination) + if queue is None: + continue + queue.mark_new_data() self._destination_wakeup_queue.add_to_queue(destination) def wake_destination(self, destination: str) -> None: @@ -968,7 +1020,9 @@ class FederationSender(AbstractFederationSender): ): return - self._get_per_destination_queue(destination).attempt_new_transaction() + queue = self._get_per_destination_queue(destination) + if queue is not None: + queue.attempt_new_transaction() @staticmethod def get_current_token() -> int: @@ -1013,6 +1067,9 @@ class FederationSender(AbstractFederationSender): d for d in destinations_to_wake if self._federation_shard_config.should_handle(self._instance_name, d) + and self.hs.config.federation.is_domain_allowed_according_to_federation_whitelist( + d + ) ] for destination in destinations_to_wake: diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index d097e65ea7..b3f65e8237 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py
@@ -156,7 +156,6 @@ class PerDestinationQueue: # Each receipt can only have a single receipt per # (room ID, receipt type, user ID, thread ID) tuple. self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = [] - self._rrs_pending_flush = False # stream_id of last successfully sent to-device message. # NB: may be a long or an int. @@ -258,15 +257,7 @@ class PerDestinationQueue: } ) - def flush_read_receipts_for_room(self, room_id: str) -> None: - # If there are any pending receipts for this room then force-flush them - # in a new transaction. - for edu in self._pending_receipt_edus: - if room_id in edu: - self._rrs_pending_flush = True - self.attempt_new_transaction() - # No use in checking remaining EDUs if the room was found. - break + self.mark_new_data() def send_keyed_edu(self, edu: Edu, key: Hashable) -> None: self._pending_edus_keyed[(edu.edu_type, key)] = edu @@ -603,12 +594,9 @@ class PerDestinationQueue: self._destination, last_successful_stream_ordering ) - def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]: + def _get_receipt_edus(self, limit: int) -> Iterable[Edu]: if not self._pending_receipt_edus: return - if not force_flush and not self._rrs_pending_flush: - # not yet time for this lot - return # Send at most limit EDUs for receipts. for content in self._pending_receipt_edus[:limit]: @@ -747,7 +735,7 @@ class _TransactionQueueManager: ) # Add read receipt EDUs. - pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5)) + pending_edus.extend(self.queue._get_receipt_edus(limit=5)) edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus) # Next, prioritize to-device messages so that existing encryption channels @@ -795,13 +783,6 @@ class _TransactionQueueManager: if not self._pdus and not pending_edus: return [], [] - # if we've decided to send a transaction anyway, and we have room, we - # may as well send any pending RRs - if edu_limit: - pending_edus.extend( - self.queue._get_receipt_edus(force_flush=True, limit=edu_limit) - ) - if self._pdus: self._last_stream_ordering = self._pdus[ -1 diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 206e91ed14..62bf96ce91 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py
@@ -143,6 +143,33 @@ class TransportLayerClient: destination, path=path, timeout=timeout, try_trailing_slash_on_400=True ) + async def get_policy_recommendation_for_pdu( + self, destination: str, event: EventBase, timeout: Optional[int] = None + ) -> JsonDict: + """Requests the policy recommendation for the given pdu from the given policy server. + + Args: + destination: The host name of the remote homeserver checking the event. + event: The event to check. + timeout: How long to try (in ms) the destination for before giving up. + None indicates no timeout. + + Returns: + The full recommendation object from the remote server. + """ + logger.debug( + "get_policy_recommendation_for_pdu dest=%s, event_id=%s", + destination, + event.event_id, + ) + return await self.client.post_json( + destination=destination, + path=f"/_matrix/policy/unstable/org.matrix.msc4284/event/{event.event_id}/check", + data=event.get_pdu_json(), + ignore_backoff=True, + timeout=timeout, + ) + async def backfill( self, destination: str, room_id: str, event_tuples: Collection[str], limit: int ) -> Optional[Union[JsonDict, list]]: diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index 43102567db..174d02ab6b 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py
@@ -20,9 +20,7 @@ # # import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Type - -from typing_extensions import Literal +from typing import TYPE_CHECKING, Dict, Iterable, List, Literal, Optional, Tuple, Type from synapse.api.errors import FederationDeniedError, SynapseError from synapse.federation.transport.server._base import ( diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py
index 9094201da0..cba309635b 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py
@@ -113,7 +113,7 @@ class Authenticator: ): raise AuthenticationError( HTTPStatus.UNAUTHORIZED, - "Destination mismatch in auth header", + f"Destination mismatch in auth header, received: {destination!r}", Codes.UNAUTHORIZED, ) if ( diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index 20f87c885e..eb96ff27f9 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py
@@ -24,6 +24,7 @@ from typing import ( TYPE_CHECKING, Dict, List, + Literal, Mapping, Optional, Sequence, @@ -32,8 +33,6 @@ from typing import ( Union, ) -from typing_extensions import Literal - from synapse.api.constants import Direction, EduTypes from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersions @@ -509,6 +508,9 @@ class FederationV2InviteServlet(BaseFederationServerServlet): event = content["event"] invite_room_state = content.get("invite_room_state", []) + if not isinstance(invite_room_state, list): + invite_room_state = [] + # Synapse expects invite_room_state to be in unsigned, as it is in v1 # API @@ -859,7 +861,6 @@ class FederationMediaThumbnailServlet(BaseFederationServerServlet): request: SynapseRequest, media_id: str, ) -> None: - width = parse_integer(request, "width", required=True) height = parse_integer(request, "height", required=True) method = parse_string(request, "method", "scale") diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index b2c8ba5887..3bb5f824b7 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py
@@ -19,15 +19,17 @@ # # -""" Defines the JSON structure of the protocol units used by the server to +"""Defines the JSON structure of the protocol units used by the server to server protocol. """ import logging -from typing import List, Optional +from typing import List, Optional, Sequence import attr +from synapse.api.constants import CANONICALJSON_MAX_INT, CANONICALJSON_MIN_INT +from synapse.events import EventBase from synapse.types import JsonDict logger = logging.getLogger(__name__) @@ -104,8 +106,28 @@ class Transaction: result = { "origin": self.origin, "origin_server_ts": self.origin_server_ts, - "pdus": self.pdus, + "pdus": filter_pdus_for_valid_depth(self.pdus), } if self.edus: result["edus"] = self.edus return result + + +def filter_pdus_for_valid_depth(pdus: Sequence[JsonDict]) -> List[JsonDict]: + filtered_pdus = [] + for pdu in pdus: + # Drop PDUs that have a depth that is outside of the range allowed + # by canonical json. + if ( + "depth" in pdu + and CANONICALJSON_MIN_INT <= pdu["depth"] <= CANONICALJSON_MAX_INT + ): + filtered_pdus.append(pdu) + + return filtered_pdus + + +def serialize_and_filter_pdus( + pdus: Sequence[EventBase], time_now: Optional[int] = None +) -> List[JsonDict]: + return filter_pdus_for_valid_depth([pdu.get_pdu_json(time_now) for pdu in pdus]) diff --git a/synapse/handlers/account.py b/synapse/handlers/account.py
index 89e944bc17..37cc3d3ff5 100644 --- a/synapse/handlers/account.py +++ b/synapse/handlers/account.py
@@ -118,10 +118,10 @@ class AccountHandler: } if self._use_account_validity_in_account_status: - status["org.matrix.expired"] = ( - await self._account_validity_handler.is_user_expired( - user_id.to_string() - ) + status[ + "org.matrix.expired" + ] = await self._account_validity_handler.is_user_expired( + user_id.to_string() ) return status diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 97a463d8d0..228132db48 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py
@@ -33,7 +33,7 @@ from synapse.replication.http.account_data import ( ReplicationRemoveUserAccountDataRestServlet, ) from synapse.streams import EventSource -from synapse.types import JsonDict, StrCollection, StreamKeyType, UserID +from synapse.types import JsonDict, JsonMapping, StrCollection, StreamKeyType, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -253,7 +253,7 @@ class AccountDataHandler: return response["max_stream_id"] async def add_tag_to_room( - self, user_id: str, room_id: str, tag: str, content: JsonDict + self, user_id: str, room_id: str, tag: str, content: JsonMapping ) -> int: """Add a tag to a room for a user. diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 7004d95a0f..e40ca3e73f 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py
@@ -18,8 +18,6 @@ # # -import email.mime.multipart -import email.utils import logging from typing import TYPE_CHECKING, List, Optional, Tuple @@ -40,18 +38,13 @@ class AccountValidityHandler: self.hs = hs self.config = hs.config self.store = hs.get_datastores().main - self.send_email_handler = hs.get_send_email_handler() self.clock = hs.get_clock() - self._app_name = hs.config.email.email_app_name self._module_api_callbacks = hs.get_module_api_callbacks().account_validity self._account_validity_enabled = ( hs.config.account_validity.account_validity_enabled ) - self._account_validity_renew_by_email_enabled = ( - hs.config.account_validity.account_validity_renew_by_email_enabled - ) self._account_validity_period = None if self._account_validity_enabled: @@ -59,21 +52,6 @@ class AccountValidityHandler: hs.config.account_validity.account_validity_period ) - if ( - self._account_validity_enabled - and self._account_validity_renew_by_email_enabled - ): - # Don't do email-specific configuration if renewal by email is disabled. - self._template_html = hs.config.email.account_validity_template_html - self._template_text = hs.config.email.account_validity_template_text - self._renew_email_subject = ( - hs.config.account_validity.account_validity_renew_email_subject - ) - - # Check the renewal emails to send and send them every 30min. - if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000) - async def is_user_expired(self, user_id: str) -> bool: """Checks if a user has expired against third-party modules. @@ -120,125 +98,6 @@ class AccountValidityHandler: for callback in self._module_api_callbacks.on_user_login_callbacks: await callback(user_id, auth_provider_type, auth_provider_id) - @wrap_as_background_process("send_renewals") - async def _send_renewal_emails(self) -> None: - """Gets the list of users whose account is expiring in the amount of time - configured in the ``renew_at`` parameter from the ``account_validity`` - configuration, and sends renewal emails to all of these users as long as they - have an email 3PID attached to their account. - """ - expiring_users = await self.store.get_users_expiring_soon() - - if expiring_users: - for user_id, expiration_ts_ms in expiring_users: - await self._send_renewal_email( - user_id=user_id, expiration_ts=expiration_ts_ms - ) - - async def send_renewal_email_to_user(self, user_id: str) -> None: - """ - Send a renewal email for a specific user. - - Args: - user_id: The user ID to send a renewal email for. - - Raises: - SynapseError if the user is not set to renew. - """ - # If a module supports sending a renewal email from here, do that, otherwise do - # the legacy dance. - if self._module_api_callbacks.on_legacy_send_mail_callback is not None: - await self._module_api_callbacks.on_legacy_send_mail_callback(user_id) - return - - if not self._account_validity_renew_by_email_enabled: - raise AuthError( - 403, "Account renewal via email is disabled on this server." - ) - - expiration_ts = await self.store.get_expiration_ts_for_user(user_id) - - # If this user isn't set to be expired, raise an error. - if expiration_ts is None: - raise SynapseError(400, "User has no expiration time: %s" % (user_id,)) - - await self._send_renewal_email(user_id, expiration_ts) - - async def _send_renewal_email(self, user_id: str, expiration_ts: int) -> None: - """Sends out a renewal email to every email address attached to the given user - with a unique link allowing them to renew their account. - - Args: - user_id: ID of the user to send email(s) to. - expiration_ts: Timestamp in milliseconds for the expiration date of - this user's account (used in the email templates). - """ - addresses = await self._get_email_addresses_for_user(user_id) - - # Stop right here if the user doesn't have at least one email address. - # In this case, they will have to ask their server admin to renew their - # account manually. - # We don't need to do a specific check to make sure the account isn't - # deactivated, as a deactivated account isn't supposed to have any - # email address attached to it. - if not addresses: - return - - try: - user_display_name = await self.store.get_profile_displayname( - UserID.from_string(user_id) - ) - if user_display_name is None: - user_display_name = user_id - except StoreError: - user_display_name = user_id - - renewal_token = await self._get_renewal_token(user_id) - url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % ( - self.hs.config.server.public_baseurl, - renewal_token, - ) - - template_vars = { - "display_name": user_display_name, - "expiration_ts": expiration_ts, - "url": url, - } - - html_text = self._template_html.render(**template_vars) - plain_text = self._template_text.render(**template_vars) - - for address in addresses: - raw_to = email.utils.parseaddr(address)[1] - - await self.send_email_handler.send_email( - email_address=raw_to, - subject=self._renew_email_subject, - app_name=self._app_name, - html=html_text, - text=plain_text, - ) - - await self.store.set_renewal_mail_status(user_id=user_id, email_sent=True) - - async def _get_email_addresses_for_user(self, user_id: str) -> List[str]: - """Retrieve the list of email addresses attached to a user's account. - - Args: - user_id: ID of the user to lookup email addresses for. - - Returns: - Email addresses for this account. - """ - threepids = await self.store.user_get_threepids(user_id) - - addresses = [] - for threepid in threepids: - if threepid.medium == "email": - addresses.append(threepid.address) - - return addresses - async def _get_renewal_token(self, user_id: str) -> str: """Generates a 32-byte long random string that will be inserted into the user's renewal email's unique link, then saves it into the database. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index b44e862493..5467d129bd 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py
@@ -21,13 +21,34 @@ import abc import logging -from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Set +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, +) import attr -from synapse.api.constants import Direction, Membership +from synapse.api.constants import Direction, EventTypes, Membership +from synapse.api.errors import SynapseError from synapse.events import EventBase -from synapse.types import JsonMapping, RoomStreamToken, StateMap, UserID, UserInfo +from synapse.types import ( + JsonMapping, + Requester, + RoomStreamToken, + ScheduledTask, + StateMap, + TaskStatus, + UserID, + UserInfo, + create_requester, +) from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -35,6 +56,8 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +REDACT_ALL_EVENTS_ACTION_NAME = "redact_all_events" + class AdminHandler: def __init__(self, hs: "HomeServer"): @@ -43,6 +66,22 @@ class AdminHandler: self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state self._msc3866_enabled = hs.config.experimental.msc3866.enabled + self.event_creation_handler = hs.get_event_creation_handler() + self._task_scheduler = hs.get_task_scheduler() + + self._task_scheduler.register_action( + self._redact_all_events, REDACT_ALL_EVENTS_ACTION_NAME + ) + + self.hs = hs + + async def get_redact_task(self, redact_id: str) -> Optional[ScheduledTask]: + """Get the current status of an active redaction process + + Args: + redact_id: redact_id returned by start_redact_events. + """ + return await self._task_scheduler.get_task(redact_id) async def get_whois(self, user: UserID) -> JsonMapping: connections = [] @@ -85,6 +124,7 @@ class AdminHandler: "consent_ts": user_info.consent_ts, "user_type": user_info.user_type, "is_guest": user_info.is_guest, + "suspended": user_info.suspended, } if self._msc3866_enabled: @@ -93,7 +133,6 @@ class AdminHandler: # Add additional user metadata profile = await self._store.get_profileinfo(user) - threepids = await self._store.user_get_threepids(user.to_string()) external_ids = [ ({"auth_provider": auth_provider, "external_id": external_id}) for auth_provider, external_id in await self._store.get_external_ids_by_user( @@ -102,7 +141,6 @@ class AdminHandler: ] user_info_dict["displayname"] = profile.display_name user_info_dict["avatar_url"] = profile.avatar_url - user_info_dict["threepids"] = [attr.asdict(t) for t in threepids] user_info_dict["external_ids"] = external_ids user_info_dict["erased"] = await self._store.is_user_erased(user.to_string()) @@ -197,14 +235,16 @@ class AdminHandler: # events that we have and then filtering, this isn't the most # efficient method perhaps but it does guarantee we get everything. while True: - events, _ = ( - await self._store.paginate_room_events_by_topological_ordering( - room_id=room_id, - from_key=from_key, - to_key=to_key, - limit=100, - direction=Direction.FORWARDS, - ) + ( + events, + _, + _, + ) = await self._store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_key, + to_key=to_key, + limit=100, + direction=Direction.FORWARDS, ) if not events: break @@ -311,6 +351,155 @@ class AdminHandler: return writer.finished() + async def start_redact_events( + self, + user_id: str, + rooms: list, + requester: JsonMapping, + reason: Optional[str], + limit: Optional[int], + ) -> str: + """ + Start a task redacting the events of the given user in the given rooms + + Args: + user_id: the user ID of the user whose events should be redacted + rooms: the rooms in which to redact the user's events + requester: the user requesting the events + reason: reason for requesting the redaction, ie spam, etc + limit: limit on the number of events in each room to redact + + Returns: + a unique ID which can be used to query the status of the task + """ + active_tasks = await self._task_scheduler.get_tasks( + actions=[REDACT_ALL_EVENTS_ACTION_NAME], + resource_id=user_id, + statuses=[TaskStatus.ACTIVE], + ) + + if len(active_tasks) > 0: + raise SynapseError( + 400, "Redact already in progress for user %s" % (user_id,) + ) + + if not limit: + limit = 1000 + + redact_id = await self._task_scheduler.schedule_task( + REDACT_ALL_EVENTS_ACTION_NAME, + resource_id=user_id, + params={ + "rooms": rooms, + "requester": requester, + "user_id": user_id, + "reason": reason, + "limit": limit, + }, + ) + + logger.info( + "starting redact events with redact_id %s", + redact_id, + ) + + return redact_id + + async def _redact_all_events( + self, task: ScheduledTask + ) -> Tuple[TaskStatus, Optional[Mapping[str, Any]], Optional[str]]: + """ + Task to redact all of a users events in the given rooms, tracking which, if any, events + whose redaction failed + """ + + assert task.params is not None + rooms = task.params.get("rooms") + assert rooms is not None + + r = task.params.get("requester") + assert r is not None + admin = Requester.deserialize(self._store, r) + + user_id = task.params.get("user_id") + assert user_id is not None + + # puppet the user if they're ours, otherwise use admin to redact + requester = create_requester( + user_id if self.hs.is_mine_id(user_id) else admin.user.to_string(), + authenticated_entity=admin.user.to_string(), + ) + + reason = task.params.get("reason") + limit = task.params.get("limit") + assert limit is not None + + result: Mapping[str, Any] = ( + task.result if task.result else {"failed_redactions": {}} + ) + for room in rooms: + room_version = await self._store.get_room_version(room) + event_ids = await self._store.get_events_sent_by_user_in_room( + user_id, + room, + limit, + ["m.room.member", "m.room.message", "m.room.encrypted"], + ) + if not event_ids: + # nothing to redact in this room + continue + + events = await self._store.get_events_as_list(event_ids) + for event in events: + # we care about join events but not other membership events + if event.type == "m.room.member": + content = event.content + if content: + if content.get("membership") == Membership.JOIN: + pass + else: + continue + relations = await self._store.get_relations_for_event( + room, event.event_id, event, event_type=EventTypes.Redaction + ) + + # if we've already successfully redacted this event then skip processing it + if relations[0]: + continue + + event_dict = { + "type": EventTypes.Redaction, + "content": {"reason": reason} if reason else {}, + "room_id": room, + "sender": requester.user.to_string(), + } + if room_version.updated_redaction_rules: + event_dict["content"]["redacts"] = event.event_id + else: + event_dict["redacts"] = event.event_id + + try: + # set the prev event to the offending message to allow for redactions + # to be processed in the case where the user has been kicked/banned before + # redactions are requested + ( + redaction, + _, + ) = await self.event_creation_handler.create_and_send_nonmember_event( + requester, + event_dict, + prev_event_ids=[event.event_id], + ratelimit=False, + ) + except Exception as ex: + logger.info( + f"Redaction of event {event.event_id} failed due to: {ex}" + ) + result["failed_redactions"][event.event_id] = str(ex) + await self._task_scheduler.update_task(task.id, result=result) + + return TaskStatus.COMPLETE, result, None + class ExfiltrationWriter(metaclass=abc.ABCMeta): """Interface used to specify how to write exported data.""" diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 4b33e1330d..b7d1033351 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py
@@ -896,10 +896,10 @@ class ApplicationServicesHandler: results = await make_deferred_yieldable( defer.DeferredList( [ - run_in_background( + run_in_background( # type: ignore[call-overload] self.appservice_api.claim_client_keys, # We know this must be an app service. - self.store.get_app_service_by_id(service_id), # type: ignore[arg-type] + self.store.get_app_service_by_id(service_id), service_query, ) for service_id, service_query in query_by_appservice.items() @@ -952,10 +952,10 @@ class ApplicationServicesHandler: results = await make_deferred_yieldable( defer.DeferredList( [ - run_in_background( + run_in_background( # type: ignore[call-overload] self.appservice_api.query_keys, # We know this must be an app service. - self.store.get_app_service_by_id(service_id), # type: ignore[arg-type] + self.store.get_app_service_by_id(service_id), service_query, ) for service_id, service_query in query_by_appservice.items() diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index a1fab99f6b..d37324cc46 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py
@@ -79,9 +79,7 @@ from synapse.storage.databases.main.registration import ( from synapse.types import JsonDict, Requester, UserID from synapse.util import stringutils as stringutils from synapse.util.async_helpers import delay_cancellation, maybe_awaitable -from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import base62_encode -from synapse.util.threepids import canonicalise_email if TYPE_CHECKING: from synapse.module_api import ModuleApi @@ -153,42 +151,9 @@ def convert_client_dict_legacy_fields_to_identifier( return identifier -def login_id_phone_to_thirdparty(identifier: JsonDict) -> Dict[str, str]: - """ - Convert a phone login identifier type to a generic threepid identifier. - - Args: - identifier: Login identifier dict of type 'm.id.phone' - - Returns: - An equivalent m.id.thirdparty identifier dict - """ - if "country" not in identifier or ( - # The specification requires a "phone" field, while Synapse used to require a "number" - # field. Accept both for backwards compatibility. - "phone" not in identifier - and "number" not in identifier - ): - raise SynapseError( - 400, "Invalid phone-type identifier", errcode=Codes.INVALID_PARAM - ) - - # Accept both "phone" and "number" as valid keys in m.id.phone - phone_number = identifier.get("phone", identifier["number"]) - - # Convert user-provided phone number to a consistent representation - msisdn = phone_number_to_msisdn(identifier["country"], phone_number) - - return { - "type": "m.id.thirdparty", - "medium": "msisdn", - "address": msisdn, - } - - @attr.s(slots=True, auto_attribs=True) class SsoLoginExtraAttributes: - """Data we track about SAML2 sessions""" + """Data we track about SAML2 sessions""" # Not other SSO types...? # time the session was created, in milliseconds creation_time: int @@ -1195,70 +1160,11 @@ class AuthHandler: # convert phone type identifiers to generic threepids if identifier_dict["type"] == "m.id.phone": - identifier_dict = login_id_phone_to_thirdparty(identifier_dict) + raise SynapseError(400, "Third party identifiers are not supported on this server.") # convert threepid identifiers to user IDs if identifier_dict["type"] == "m.id.thirdparty": - address = identifier_dict.get("address") - medium = identifier_dict.get("medium") - - if medium is None or address is None: - raise SynapseError(400, "Invalid thirdparty identifier") - - # For emails, canonicalise the address. - # We store all email addresses canonicalised in the DB. - # (See add_threepid in synapse/handlers/auth.py) - if medium == "email": - try: - address = canonicalise_email(address) - except ValueError as e: - raise SynapseError(400, str(e)) - - # We also apply account rate limiting using the 3PID as a key, as - # otherwise using 3PID bypasses the ratelimiting based on user ID. - if ratelimit: - await self._failed_login_attempts_ratelimiter.ratelimit( - None, (medium, address), update=False - ) - - # Check for login providers that support 3pid login types - if login_type == LoginType.PASSWORD: - # we've already checked that there is a (valid) password field - assert isinstance(password, str) - ( - canonical_user_id, - callback_3pid, - ) = await self.check_password_provider_3pid(medium, address, password) - if canonical_user_id: - # Authentication through password provider and 3pid succeeded - return canonical_user_id, callback_3pid - - # No password providers were able to handle this 3pid - # Check local store - user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( - medium, address - ) - if not user_id: - logger.warning( - "unknown 3pid identifier medium %s, address %r", medium, address - ) - # We mark that we've failed to log in here, as - # `check_password_provider_3pid` might have returned `None` due - # to an incorrect password, rather than the account not - # existing. - # - # If it returned None but the 3PID was bound then we won't hit - # this code path, which is fine as then the per-user ratelimit - # will kick in below. - if ratelimit: - await self._failed_login_attempts_ratelimiter.can_do_action( - None, (medium, address) - ) - raise LoginError( - 403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN - ) - - identifier_dict = {"type": "m.id.user", "user": user_id} + raise SynapseError(400, "Third party identifiers are not supported on this server.") # by this point, the identifier should be an m.id.user: if it's anything # else, we haven't understood it. @@ -1548,83 +1454,6 @@ class AuthHandler: user_id, (token_id for _, token_id, _ in tokens_and_devices) ) - async def add_threepid( - self, user_id: str, medium: str, address: str, validated_at: int - ) -> None: - """ - Adds an association between a user's Matrix ID and a third-party ID (email, - phone number). - - Args: - user_id: The ID of the user to associate. - medium: The medium of the third-party ID (email, msisdn). - address: The address of the third-party ID (i.e. an email address). - validated_at: The timestamp in ms of when the validation that the user owns - this third-party ID occurred. - """ - # check if medium has a valid value - if medium not in ["email", "msisdn"]: - raise SynapseError( - code=400, - msg=("'%s' is not a valid value for 'medium'" % (medium,)), - errcode=Codes.INVALID_PARAM, - ) - - # 'Canonicalise' email addresses down to lower case. - # We've now moving towards the homeserver being the entity that - # is responsible for validating threepids used for resetting passwords - # on accounts, so in future Synapse will gain knowledge of specific - # types (mediums) of threepid. For now, we still use the existing - # infrastructure, but this is the start of synapse gaining knowledge - # of specific types of threepid (and fixes the fact that checking - # for the presence of an email address during password reset was - # case sensitive). - if medium == "email": - address = canonicalise_email(address) - - await self.store.user_add_threepid( - user_id, medium, address, validated_at, self.hs.get_clock().time_msec() - ) - - # Inform Synapse modules that a 3PID association has been created. - await self._third_party_rules.on_add_user_third_party_identifier( - user_id, medium, address - ) - - # Deprecated method for informing Synapse modules that a 3PID association - # has successfully been created. - await self._third_party_rules.on_threepid_bind(user_id, medium, address) - - async def delete_local_threepid( - self, user_id: str, medium: str, address: str - ) -> None: - """Deletes an association between a third-party ID and a user ID from the local - database. This method does not unbind the association from any identity servers. - - If `medium` is 'email' and a pusher is associated with this third-party ID, the - pusher will also be deleted. - - Args: - user_id: ID of user to remove the 3pid from. - medium: The medium of the 3pid being removed: "email" or "msisdn". - address: The 3pid address to remove. - """ - # 'Canonicalise' email addresses as per above - if medium == "email": - address = canonicalise_email(address) - - await self.store.user_delete_threepid(user_id, medium, address) - - # Inform Synapse modules that a 3PID association has been deleted. - await self._third_party_rules.on_remove_user_third_party_identifier( - user_id, medium, address - ) - - if medium == "email": - await self.store.delete_pusher_by_app_id_pushkey_user_id( - app_id="m.email", pushkey=address, user_id=user_id - ) - async def hash(self, password: str) -> str: """Computes a secure hash of password. diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py deleted file mode 100644
index cc3d641b7d..0000000000 --- a/synapse/handlers/cas.py +++ /dev/null
@@ -1,412 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2020 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# -import logging -import urllib.parse -from typing import TYPE_CHECKING, Dict, List, Optional -from xml.etree import ElementTree as ET - -import attr - -from twisted.web.client import PartialDownloadError - -from synapse.api.errors import HttpResponseException -from synapse.handlers.sso import MappingException, UserAttributes -from synapse.http.site import SynapseRequest -from synapse.types import UserID, map_username_to_mxid_localpart - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class CasError(Exception): - """Used to catch errors when validating the CAS ticket.""" - - def __init__(self, error: str, error_description: Optional[str] = None): - self.error = error - self.error_description = error_description - - def __str__(self) -> str: - if self.error_description: - return f"{self.error}: {self.error_description}" - return self.error - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class CasResponse: - username: str - attributes: Dict[str, List[Optional[str]]] - - -class CasHandler: - """ - Utility class for to handle the response from a CAS SSO service. - - Args: - hs - """ - - def __init__(self, hs: "HomeServer"): - self.hs = hs - self._hostname = hs.hostname - self._store = hs.get_datastores().main - self._auth_handler = hs.get_auth_handler() - self._registration_handler = hs.get_registration_handler() - - self._cas_server_url = hs.config.cas.cas_server_url - self._cas_service_url = hs.config.cas.cas_service_url - self._cas_protocol_version = hs.config.cas.cas_protocol_version - self._cas_displayname_attribute = hs.config.cas.cas_displayname_attribute - self._cas_required_attributes = hs.config.cas.cas_required_attributes - self._cas_enable_registration = hs.config.cas.cas_enable_registration - self._cas_allow_numeric_ids = hs.config.cas.cas_allow_numeric_ids - self._cas_numeric_ids_prefix = hs.config.cas.cas_numeric_ids_prefix - - self._http_client = hs.get_proxied_http_client() - - # identifier for the external_ids table - self.idp_id = "cas" - - # user-facing name of this auth provider - self.idp_name = hs.config.cas.idp_name - - # MXC URI for icon for this auth provider - self.idp_icon = hs.config.cas.idp_icon - - # optional brand identifier for this auth provider - self.idp_brand = hs.config.cas.idp_brand - - self._sso_handler = hs.get_sso_handler() - - self._sso_handler.register_identity_provider(self) - - def _build_service_param(self, args: Dict[str, str]) -> str: - """ - Generates a value to use as the "service" parameter when redirecting or - querying the CAS service. - - Args: - args: Additional arguments to include in the final redirect URL. - - Returns: - The URL to use as a "service" parameter. - """ - return "%s?%s" % ( - self._cas_service_url, - urllib.parse.urlencode(args), - ) - - async def _validate_ticket( - self, ticket: str, service_args: Dict[str, str] - ) -> CasResponse: - """ - Validate a CAS ticket with the server, and return the parsed the response. - - Args: - ticket: The CAS ticket from the client. - service_args: Additional arguments to include in the service URL. - Should be the same as those passed to `handle_redirect_request`. - - Raises: - CasError: If there's an error parsing the CAS response. - - Returns: - The parsed CAS response. - """ - if self._cas_protocol_version == 3: - uri = self._cas_server_url + "/p3/proxyValidate" - else: - uri = self._cas_server_url + "/proxyValidate" - args = { - "ticket": ticket, - "service": self._build_service_param(service_args), - } - try: - body = await self._http_client.get_raw(uri, args) - except PartialDownloadError as pde: - # Twisted raises this error if the connection is closed, - # even if that's being used old-http style to signal end-of-data - # Assertion is for mypy's benefit. Error.response is Optional[bytes], - # but a PartialDownloadError should always have a non-None response. - assert pde.response is not None - body = pde.response - except HttpResponseException as e: - description = ( - 'Authorization server responded with a "{status}" error ' - "while exchanging the authorization code." - ).format(status=e.code) - raise CasError("server_error", description) from e - - return self._parse_cas_response(body) - - def _parse_cas_response(self, cas_response_body: bytes) -> CasResponse: - """ - Retrieve the user and other parameters from the CAS response. - - Args: - cas_response_body: The response from the CAS query. - - Raises: - CasError: If there's an error parsing the CAS response. - - Returns: - The parsed CAS response. - """ - - # Ensure the response is valid. - root = ET.fromstring(cas_response_body) - if not root.tag.endswith("serviceResponse"): - raise CasError( - "missing_service_response", - "root of CAS response is not serviceResponse", - ) - - success = root[0].tag.endswith("authenticationSuccess") - if not success: - raise CasError("unsucessful_response", "Unsuccessful CAS response") - - # Iterate through the nodes and pull out the user and any extra attributes. - user = None - attributes: Dict[str, List[Optional[str]]] = {} - for child in root[0]: - if child.tag.endswith("user"): - user = child.text - # if numeric user IDs are allowed and username is numeric then we add the prefix so Synapse can handle it - if self._cas_allow_numeric_ids and user is not None and user.isdigit(): - user = f"{self._cas_numeric_ids_prefix}{user}" - if child.tag.endswith("attributes"): - for attribute in child: - # ElementTree library expands the namespace in - # attribute tags to the full URL of the namespace. - # We don't care about namespace here and it will always - # be encased in curly braces, so we remove them. - tag = attribute.tag - if "}" in tag: - tag = tag.split("}")[1] - attributes.setdefault(tag, []).append(attribute.text) - - # Ensure a user was found. - if user is None: - raise CasError("no_user", "CAS response does not contain user") - - return CasResponse(user, attributes) - - async def handle_redirect_request( - self, - request: SynapseRequest, - client_redirect_url: Optional[bytes], - ui_auth_session_id: Optional[str] = None, - ) -> str: - """Generates a URL for the CAS server where the client should be redirected. - - Args: - request: the incoming HTTP request - client_redirect_url: the URL that we should redirect the - client to after login (or None for UI Auth). - ui_auth_session_id: The session ID of the ongoing UI Auth (or - None if this is a login). - - Returns: - URL to redirect to - """ - - if ui_auth_session_id: - service_args = {"session": ui_auth_session_id} - else: - assert client_redirect_url - service_args = {"redirectUrl": client_redirect_url.decode("utf8")} - - args = urllib.parse.urlencode( - {"service": self._build_service_param(service_args)} - ) - - return "%s/login?%s" % (self._cas_server_url, args) - - async def handle_ticket( - self, - request: SynapseRequest, - ticket: str, - client_redirect_url: Optional[str], - session: Optional[str], - ) -> None: - """ - Called once the user has successfully authenticated with the SSO. - Validates a CAS ticket sent by the client and completes the auth process. - - If the user interactive authentication session is provided, marks the - UI Auth session as complete, then returns an HTML page notifying the - user they are done. - - Otherwise, this registers the user if necessary, and then returns a - redirect (with a login token) to the client. - - Args: - request: the incoming request from the browser. We'll - respond to it with a redirect or an HTML page. - - ticket: The CAS ticket provided by the client. - - client_redirect_url: the redirectUrl parameter from the `/cas/ticket` HTTP request, if given. - This should be the same as the redirectUrl from the original `/login/sso/redirect` request. - - session: The session parameter from the `/cas/ticket` HTTP request, if given. - This should be the UI Auth session id. - """ - args = {} - if client_redirect_url: - args["redirectUrl"] = client_redirect_url - if session: - args["session"] = session - - try: - cas_response = await self._validate_ticket(ticket, args) - except CasError as e: - logger.exception("Could not validate ticket") - self._sso_handler.render_error(request, e.error, e.error_description, 401) - return - - await self._handle_cas_response( - request, cas_response, client_redirect_url, session - ) - - async def _handle_cas_response( - self, - request: SynapseRequest, - cas_response: CasResponse, - client_redirect_url: Optional[str], - session: Optional[str], - ) -> None: - """Handle a CAS response to a ticket request. - - Assumes that the response has been validated. Maps the user onto an MXID, - registering them if necessary, and returns a response to the browser. - - Args: - request: the incoming request from the browser. We'll respond to it with an - HTML page or a redirect - - cas_response: The parsed CAS response. - - client_redirect_url: the redirectUrl parameter from the `/cas/ticket` HTTP request, if given. - This should be the same as the redirectUrl from the original `/login/sso/redirect` request. - - session: The session parameter from the `/cas/ticket` HTTP request, if given. - This should be the UI Auth session id. - """ - - # first check if we're doing a UIA - if session: - return await self._sso_handler.complete_sso_ui_auth_request( - self.idp_id, - cas_response.username, - session, - request, - ) - - # otherwise, we're handling a login request. - - # Ensure that the attributes of the logged in user meet the required - # attributes. - if not self._sso_handler.check_required_attributes( - request, cas_response.attributes, self._cas_required_attributes - ): - return - - # Call the mapper to register/login the user - - # If this not a UI auth request than there must be a redirect URL. - assert client_redirect_url is not None - - try: - await self._complete_cas_login(cas_response, request, client_redirect_url) - except MappingException as e: - logger.exception("Could not map user") - self._sso_handler.render_error(request, "mapping_error", str(e)) - - async def _complete_cas_login( - self, - cas_response: CasResponse, - request: SynapseRequest, - client_redirect_url: str, - ) -> None: - """ - Given a CAS response, complete the login flow - - Retrieves the remote user ID, registers the user if necessary, and serves - a redirect back to the client with a login-token. - - Args: - cas_response: The parsed CAS response. - request: The request to respond to - client_redirect_url: The redirect URL passed in by the client. - - Raises: - MappingException if there was a problem mapping the response to a user. - RedirectException: some mapping providers may raise this if they need - to redirect to an interstitial page. - """ - # Note that CAS does not support a mapping provider, so the logic is hard-coded. - localpart = map_username_to_mxid_localpart(cas_response.username) - - async def cas_response_to_user_attributes(failures: int) -> UserAttributes: - """ - Map from CAS attributes to user attributes. - """ - # Due to the grandfathering logic matching any previously registered - # mxids it isn't expected for there to be any failures. - if failures: - raise RuntimeError("CAS is not expected to de-duplicate Matrix IDs") - - # Arbitrarily use the first attribute found. - display_name = cas_response.attributes.get( - self._cas_displayname_attribute, [None] - )[0] - - return UserAttributes(localpart=localpart, display_name=display_name) - - async def grandfather_existing_users() -> Optional[str]: - # Since CAS did not always use the user_external_ids table, always - # to attempt to map to existing users. - user_id = UserID(localpart, self._hostname).to_string() - - logger.debug( - "Looking for existing account based on mapped %s", - user_id, - ) - - users = await self._store.get_users_by_id_case_insensitive(user_id) - if users: - registered_user_id = list(users.keys())[0] - logger.info("Grandfathering mapping to %s", registered_user_id) - return registered_user_id - - return None - - await self._sso_handler.complete_sso_login_request( - self.idp_id, - cas_response.username, - request, - client_redirect_url, - cas_response_to_user_attributes, - grandfather_existing_users, - registration_enabled=self._cas_enable_registration, - ) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 12a7cace55..2c4991c6e5 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py
@@ -43,7 +43,6 @@ class DeactivateAccountHandler: self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() self._room_member_handler = hs.get_room_member_handler() - self._identity_handler = hs.get_identity_handler() self._profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() self._server_name = hs.hostname @@ -82,7 +81,7 @@ class DeactivateAccountHandler: by_admin: Whether this change was made by an administrator. Returns: - True if identity server supports removing threepids, otherwise False. + True """ # This can only be called on the main process. @@ -96,40 +95,6 @@ class DeactivateAccountHandler: 403, "Deactivation of this user is forbidden", Codes.FORBIDDEN ) - # FIXME: Theoretically there is a race here wherein user resets - # password using threepid. - - # delete threepids first. We remove these from the IS so if this fails, - # leave the user still active so they can try again. - # Ideally we would prevent password resets and then do this in the - # background thread. - - # This will be set to false if the identity server doesn't support - # unbinding - identity_server_supports_unbinding = True - - # Attempt to unbind any known bound threepids to this account from identity - # server(s). - bound_threepids = await self.store.user_get_bound_threepids(user_id) - for medium, address in bound_threepids: - try: - result = await self._identity_handler.try_unbind_threepid( - user_id, medium, address, id_server - ) - except Exception: - # Do we want this to be a fatal error or should we carry on? - logger.exception("Failed to remove threepid from ID server") - raise SynapseError(400, "Failed to remove threepid from ID server") - - identity_server_supports_unbinding &= result - - # Remove any local threepid associations for this account. - local_threepids = await self.store.user_get_threepids(user_id) - for local_threepid in local_threepids: - await self._auth_handler.delete_local_threepid( - user_id, local_threepid.medium, local_threepid.address - ) - # delete any devices belonging to the user, which will also # delete corresponding access tokens. await self._device_handler.delete_all_devices_for_user(user_id) @@ -194,7 +159,7 @@ class DeactivateAccountHandler: by_admin, ) - return identity_server_supports_unbinding + return True async def _reject_pending_invites_and_knocks_for_user(self, user_id: str) -> None: """Reject pending invites and knocks addressed to a given user ID. diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py new file mode 100644
index 0000000000..cb2a34ff73 --- /dev/null +++ b/synapse/handlers/delayed_events.py
@@ -0,0 +1,545 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +import logging +from typing import TYPE_CHECKING, List, Optional, Set, Tuple + +from twisted.internet.interfaces import IDelayedCall + +from synapse.api.constants import EventTypes +from synapse.api.errors import ShadowBanError +from synapse.api.ratelimiting import Ratelimiter +from synapse.config.workers import MAIN_PROCESS_INSTANCE_NAME +from synapse.logging.opentracing import set_tag +from synapse.metrics import event_processing_positions +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.replication.http.delayed_events import ( + ReplicationAddedDelayedEventRestServlet, +) +from synapse.storage.databases.main.delayed_events import ( + DelayedEventDetails, + DelayID, + EventType, + StateKey, + Timestamp, + UserLocalpart, +) +from synapse.storage.databases.main.state_deltas import StateDelta +from synapse.types import ( + JsonDict, + Requester, + RoomID, + UserID, + create_requester, +) +from synapse.util.events import generate_fake_event_id +from synapse.util.metrics import Measure + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class DelayedEventsHandler: + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastores().main + self._storage_controllers = hs.get_storage_controllers() + self._config = hs.config + self._clock = hs.get_clock() + self._event_creation_handler = hs.get_event_creation_handler() + self._room_member_handler = hs.get_room_member_handler() + + self._request_ratelimiter = hs.get_request_ratelimiter() + + # Ratelimiter for management of existing delayed events, + # keyed by the sending user ID & device ID. + self._delayed_event_mgmt_ratelimiter = Ratelimiter( + store=self._store, + clock=self._clock, + cfg=self._config.ratelimiting.rc_delayed_event_mgmt, + ) + + self._next_delayed_event_call: Optional[IDelayedCall] = None + + # The current position in the current_state_delta stream + self._event_pos: Optional[int] = None + + # Guard to ensure we only process event deltas one at a time + self._event_processing = False + + if hs.config.worker.worker_app is None: + self._repl_client = None + + async def _schedule_db_events() -> None: + # We kick this off to pick up outstanding work from before the last restart. + # Block until we're up to date. + await self._unsafe_process_new_event() + hs.get_notifier().add_replication_callback(self.notify_new_event) + # Kick off again (without blocking) to catch any missed notifications + # that may have fired before the callback was added. + self._clock.call_later(0, self.notify_new_event) + + # Delayed events that are already marked as processed on startup might not have been + # sent properly on the last run of the server, so unmark them to send them again. + # Caveat: this will double-send delayed events that successfully persisted, but failed + # to be removed from the DB table of delayed events. + # TODO: To avoid double-sending, scan the timeline to find which of these events were + # already sent. To do so, must store delay_ids in sent events to retrieve them later. + await self._store.unprocess_delayed_events() + + events, next_send_ts = await self._store.process_timeout_delayed_events( + self._get_current_ts() + ) + + if next_send_ts: + self._schedule_next_at(next_send_ts) + + # Can send the events in background after having awaited on marking them as processed + run_as_background_process( + "_send_events", + self._send_events, + events, + ) + + self._initialized_from_db = run_as_background_process( + "_schedule_db_events", _schedule_db_events + ) + else: + self._repl_client = ReplicationAddedDelayedEventRestServlet.make_client(hs) + + @property + def _is_master(self) -> bool: + return self._repl_client is None + + def notify_new_event(self) -> None: + """ + Called when there may be more state event deltas to process, + which should cancel pending delayed events for the same state. + """ + if self._event_processing: + return + + self._event_processing = True + + async def process() -> None: + try: + await self._unsafe_process_new_event() + finally: + self._event_processing = False + + run_as_background_process("delayed_events.notify_new_event", process) + + async def _unsafe_process_new_event(self) -> None: + # If self._event_pos is None then means we haven't fetched it from the DB yet + if self._event_pos is None: + self._event_pos = await self._store.get_delayed_events_stream_pos() + room_max_stream_ordering = self._store.get_room_max_stream_ordering() + if self._event_pos > room_max_stream_ordering: + # apparently, we've processed more events than exist in the database! + # this can happen if events are removed with history purge or similar. + logger.warning( + "Event stream ordering appears to have gone backwards (%i -> %i): " + "rewinding delayed events processor", + self._event_pos, + room_max_stream_ordering, + ) + self._event_pos = room_max_stream_ordering + + # Loop round handling deltas until we're up to date + while True: + with Measure(self._clock, "delayed_events_delta"): + room_max_stream_ordering = self._store.get_room_max_stream_ordering() + if self._event_pos == room_max_stream_ordering: + return + + logger.debug( + "Processing delayed events %s->%s", + self._event_pos, + room_max_stream_ordering, + ) + ( + max_pos, + deltas, + ) = await self._storage_controllers.state.get_current_state_deltas( + self._event_pos, room_max_stream_ordering + ) + + logger.debug( + "Handling %d state deltas for delayed events processing", + len(deltas), + ) + await self._handle_state_deltas(deltas) + + self._event_pos = max_pos + + # Expose current event processing position to prometheus + event_processing_positions.labels("delayed_events").set(max_pos) + + await self._store.update_delayed_events_stream_pos(max_pos) + + async def _handle_state_deltas(self, deltas: List[StateDelta]) -> None: + """ + Process current state deltas to cancel other users' pending delayed events + that target the same state. + """ + for delta in deltas: + if delta.event_id is None: + logger.debug( + "Not handling delta for deleted state: %r %r", + delta.event_type, + delta.state_key, + ) + continue + + logger.debug( + "Handling: %r %r, %s", delta.event_type, delta.state_key, delta.event_id + ) + + event = await self._store.get_event( + delta.event_id, check_room_id=delta.room_id, allow_rejected=True, allow_none=True + ) + + if event is None or event.rejected_reason is not None: + # This event has been rejected, so we don't want to cancel any delayed events for it. + continue + + sender = UserID.from_string(event.sender) + + next_send_ts = await self._store.cancel_delayed_state_events( + room_id=delta.room_id, + event_type=delta.event_type, + state_key=delta.state_key, + not_from_localpart=( + sender.localpart + if sender.domain == self._config.server.server_name + else "" + ), + ) + + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at_or_none(next_send_ts) + + async def add( + self, + requester: Requester, + *, + room_id: str, + event_type: str, + state_key: Optional[str], + origin_server_ts: Optional[int], + content: JsonDict, + delay: int, + ) -> str: + """ + Creates a new delayed event and schedules its delivery. + + Args: + requester: The requester of the delayed event, who will be its owner. + room_id: The ID of the room where the event should be sent to. + event_type: The type of event to be sent. + state_key: The state key of the event to be sent, or None if it is not a state event. + origin_server_ts: The custom timestamp to send the event with. + If None, the timestamp will be the actual time when the event is sent. + content: The content of the event to be sent. + delay: How long (in milliseconds) to wait before automatically sending the event. + + Returns: The ID of the added delayed event. + + Raises: + SynapseError: if the delayed event fails validation checks. + """ + # Use standard request limiter for scheduling new delayed events. + # TODO: Instead apply ratelimiting based on the scheduled send time. + # See https://github.com/element-hq/synapse/issues/18021 + await self._request_ratelimiter.ratelimit(requester) + + self._event_creation_handler.validator.validate_builder( + self._event_creation_handler.event_builder_factory.for_room_version( + await self._store.get_room_version(room_id), + { + "type": event_type, + "content": content, + "room_id": room_id, + "sender": str(requester.user), + **({"state_key": state_key} if state_key is not None else {}), + }, + ) + ) + + creation_ts = self._get_current_ts() + + delay_id, next_send_ts = await self._store.add_delayed_event( + user_localpart=requester.user.localpart, + device_id=requester.device_id, + creation_ts=creation_ts, + room_id=room_id, + event_type=event_type, + state_key=state_key, + origin_server_ts=origin_server_ts, + content=content, + delay=delay, + ) + + if self._repl_client is not None: + # NOTE: If this throws, the delayed event will remain in the DB and + # will be picked up once the main worker gets another delayed event. + await self._repl_client( + instance_name=MAIN_PROCESS_INSTANCE_NAME, + next_send_ts=next_send_ts, + ) + elif self._next_send_ts_changed(next_send_ts): + self._schedule_next_at(next_send_ts) + + return delay_id + + def on_added(self, next_send_ts: int) -> None: + next_send_ts = Timestamp(next_send_ts) + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at(next_send_ts) + + async def cancel(self, requester: Requester, delay_id: str) -> None: + """ + Cancels the scheduled delivery of the matching delayed event. + + Args: + requester: The owner of the delayed event to act on. + delay_id: The ID of the delayed event to act on. + + Raises: + NotFoundError: if no matching delayed event could be found. + """ + assert self._is_master + await self._delayed_event_mgmt_ratelimiter.ratelimit( + requester, + (requester.user.to_string(), requester.device_id), + ) + await self._initialized_from_db + + next_send_ts = await self._store.cancel_delayed_event( + delay_id=delay_id, + user_localpart=requester.user.localpart, + ) + + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at_or_none(next_send_ts) + + async def restart(self, requester: Requester, delay_id: str) -> None: + """ + Restarts the scheduled delivery of the matching delayed event. + + Args: + requester: The owner of the delayed event to act on. + delay_id: The ID of the delayed event to act on. + + Raises: + NotFoundError: if no matching delayed event could be found. + """ + assert self._is_master + await self._delayed_event_mgmt_ratelimiter.ratelimit( + requester, + (requester.user.to_string(), requester.device_id), + ) + await self._initialized_from_db + + next_send_ts = await self._store.restart_delayed_event( + delay_id=delay_id, + user_localpart=requester.user.localpart, + current_ts=self._get_current_ts(), + ) + + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at(next_send_ts) + + async def send(self, requester: Requester, delay_id: str) -> None: + """ + Immediately sends the matching delayed event, instead of waiting for its scheduled delivery. + + Args: + requester: The owner of the delayed event to act on. + delay_id: The ID of the delayed event to act on. + + Raises: + NotFoundError: if no matching delayed event could be found. + """ + assert self._is_master + # Use standard request limiter for sending delayed events on-demand, + # as an on-demand send is similar to sending a regular event. + await self._request_ratelimiter.ratelimit(requester) + await self._initialized_from_db + + event, next_send_ts = await self._store.process_target_delayed_event( + delay_id=delay_id, + user_localpart=requester.user.localpart, + ) + + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at_or_none(next_send_ts) + + await self._send_event( + DelayedEventDetails( + delay_id=DelayID(delay_id), + user_localpart=UserLocalpart(requester.user.localpart), + room_id=event.room_id, + type=event.type, + state_key=event.state_key, + origin_server_ts=event.origin_server_ts, + content=event.content, + device_id=event.device_id, + ) + ) + + async def _send_on_timeout(self) -> None: + self._next_delayed_event_call = None + + events, next_send_ts = await self._store.process_timeout_delayed_events( + self._get_current_ts() + ) + + if next_send_ts: + self._schedule_next_at(next_send_ts) + + await self._send_events(events) + + async def _send_events(self, events: List[DelayedEventDetails]) -> None: + sent_state: Set[Tuple[RoomID, EventType, StateKey]] = set() + for event in events: + if event.state_key is not None: + state_info = (event.room_id, event.type, event.state_key) + if state_info in sent_state: + continue + else: + state_info = None + try: + # TODO: send in background if message event or non-conflicting state event + await self._send_event(event) + if state_info is not None: + sent_state.add(state_info) + except Exception: + logger.exception("Failed to send delayed event") + + for room_id, event_type, state_key in sent_state: + await self._store.delete_processed_delayed_state_events( + room_id=str(room_id), + event_type=event_type, + state_key=state_key, + ) + + def _schedule_next_at_or_none(self, next_send_ts: Optional[Timestamp]) -> None: + if next_send_ts is not None: + self._schedule_next_at(next_send_ts) + elif self._next_delayed_event_call is not None: + self._next_delayed_event_call.cancel() + self._next_delayed_event_call = None + + def _schedule_next_at(self, next_send_ts: Timestamp) -> None: + delay = next_send_ts - self._get_current_ts() + delay_sec = delay / 1000 if delay > 0 else 0 + + if self._next_delayed_event_call is None: + self._next_delayed_event_call = self._clock.call_later( + delay_sec, + run_as_background_process, + "_send_on_timeout", + self._send_on_timeout, + ) + else: + self._next_delayed_event_call.reset(delay_sec) + + async def get_all_for_user(self, requester: Requester) -> List[JsonDict]: + """Return all pending delayed events requested by the given user.""" + await self._delayed_event_mgmt_ratelimiter.ratelimit( + requester, + (requester.user.to_string(), requester.device_id), + ) + return await self._store.get_all_delayed_events_for_user( + requester.user.localpart + ) + + async def _send_event( + self, + event: DelayedEventDetails, + txn_id: Optional[str] = None, + ) -> None: + user_id = UserID(event.user_localpart, self._config.server.server_name) + user_id_str = user_id.to_string() + # Create a new requester from what data is currently available + requester = create_requester( + user_id, + is_guest=await self._store.is_guest(user_id_str), + device_id=event.device_id, + ) + + try: + if event.state_key is not None and event.type == EventTypes.Member: + membership = event.content.get("membership") + assert membership is not None + event_id, _ = await self._room_member_handler.update_membership( + requester, + target=UserID.from_string(event.state_key), + room_id=event.room_id.to_string(), + action=membership, + content=event.content, + origin_server_ts=event.origin_server_ts, + ) + else: + event_dict: JsonDict = { + "type": event.type, + "content": event.content, + "room_id": event.room_id.to_string(), + "sender": user_id_str, + } + + if event.origin_server_ts is not None: + event_dict["origin_server_ts"] = event.origin_server_ts + + if event.state_key is not None: + event_dict["state_key"] = event.state_key + + ( + sent_event, + _, + ) = await self._event_creation_handler.create_and_send_nonmember_event( + requester, + event_dict, + txn_id=txn_id, + ) + event_id = sent_event.event_id + except ShadowBanError: + event_id = generate_fake_event_id() + finally: + # TODO: If this is a temporary error, retry. Otherwise, consider notifying clients of the failure + try: + await self._store.delete_processed_delayed_event( + event.delay_id, event.user_localpart + ) + except Exception: + logger.exception("Failed to delete processed delayed event") + + set_tag("event_id", event_id) + + def _get_current_ts(self) -> Timestamp: + return Timestamp(self._clock.time_msec()) + + def _next_send_ts_changed(self, next_send_ts: Optional[Timestamp]) -> bool: + # The DB alone knows if the next send time changed after adding/modifying + # a delayed event, but if we were to ever miss updating our delayed call's + # firing time, we may miss other updates. So, keep track of changes to the + # the next send time here instead of in the DB. + cached_next_send_ts = ( + int(self._next_delayed_event_call.getTime() * 1000) + if self._next_delayed_event_call is not None + else None + ) + return next_send_ts != cached_next_send_ts diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index 4fc6fcd7ae..f8b547bbed 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py
@@ -20,10 +20,21 @@ # # import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Set, Tuple +from threading import Lock +from typing import ( + TYPE_CHECKING, + AbstractSet, + Dict, + Iterable, + List, + Mapping, + Optional, + Set, + Tuple, +) from synapse.api import errors -from synapse.api.constants import EduTypes, EventTypes +from synapse.api.constants import EduTypes, EventTypes, Membership from synapse.api.errors import ( Codes, FederationDeniedError, @@ -38,6 +49,8 @@ from synapse.metrics.background_process_metrics import ( wrap_as_background_process, ) from synapse.storage.databases.main.client_ips import DeviceLastConnectionInfo +from synapse.storage.databases.main.roommember import EventIdMembership +from synapse.storage.databases.main.state_deltas import StateDelta from synapse.types import ( DeviceListUpdates, JsonDict, @@ -151,6 +164,8 @@ class DeviceWorkerHandler: raise errors.NotFoundError() ips = await self.store.get_last_client_ip_by_device(user_id, device_id) + + device = dict(device) _update_device_from_client_ips(device, ips) set_tag("device", str(device)) @@ -211,7 +226,6 @@ class DeviceWorkerHandler: return changed @trace - @measure_func("device.get_user_ids_changed") @cancellable async def get_user_ids_changed( self, user_id: str, from_token: StreamToken @@ -222,129 +236,113 @@ class DeviceWorkerHandler: set_tag("user_id", user_id) set_tag("from_token", str(from_token)) - now_room_key = self.store.get_room_max_token() - - room_ids = await self.store.get_rooms_for_user(user_id) - changed = await self.get_device_changes_in_shared_rooms( - user_id, room_ids, from_token - ) + now_token = self._event_sources.get_current_token() - # Then work out if any users have since joined - rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key) + # We need to work out all the different membership changes for the user + # and user they share a room with, to pass to + # `generate_sync_entry_for_device_list`. See its docstring for details + # on the data required. - member_events = await self.store.get_membership_changes_for_user( - user_id, from_token.room_key, now_room_key - ) - rooms_changed.update(event.room_id for event in member_events) - - stream_ordering = from_token.room_key.stream - - possibly_changed = set(changed) - possibly_left = set() - for room_id in rooms_changed: - # Check if the forward extremities have changed. If not then we know - # the current state won't have changed, and so we can skip this room. - try: - if not await self.store.have_room_forward_extremities_changed_since( - room_id, stream_ordering - ): - continue - except errors.StoreError: - pass + joined_room_ids = await self.store.get_rooms_for_user(user_id) - current_state_ids = await self._state_storage.get_current_state_ids( - room_id, await_full_state=False + # Get the set of rooms that the user has joined/left + membership_changes = ( + await self.store.get_current_state_delta_membership_changes_for_user( + user_id, from_key=from_token.room_key, to_key=now_token.room_key ) + ) - # The user may have left the room - # TODO: Check if they actually did or if we were just invited. - if room_id not in room_ids: - for etype, state_key in current_state_ids.keys(): - if etype != EventTypes.Member: - continue - possibly_left.add(state_key) - continue - - # Fetch the current state at the time. - try: - event_ids = await self.store.get_forward_extremities_for_room_at_stream_ordering( - room_id, stream_ordering=stream_ordering - ) - except errors.StoreError: - # we have purged the stream_ordering index since the stream - # ordering: treat it the same as a new room - event_ids = [] - - # special-case for an empty prev state: include all members - # in the changed list - if not event_ids: - log_kv( - {"event": "encountered empty previous state", "room_id": room_id} - ) - for etype, state_key in current_state_ids.keys(): - if etype != EventTypes.Member: - continue - possibly_changed.add(state_key) - continue - - current_member_id = current_state_ids.get((EventTypes.Member, user_id)) - if not current_member_id: + # Check for newly joined or left rooms. We need to make sure that we add + # to newly joined in the case membership goes from join -> leave -> join + # again. + newly_joined_rooms: Set[str] = set() + newly_left_rooms: Set[str] = set() + for change in membership_changes: + # We check for changes in "joinedness", i.e. if the membership has + # changed to or from JOIN. + if change.membership == Membership.JOIN: + if change.prev_membership != Membership.JOIN: + newly_joined_rooms.add(change.room_id) + newly_left_rooms.discard(change.room_id) + elif change.prev_membership == Membership.JOIN: + newly_joined_rooms.discard(change.room_id) + newly_left_rooms.add(change.room_id) + + # We now work out if any other users have since joined or left the rooms + # the user is currently in. + + # List of membership changes per room + room_to_deltas: Dict[str, List[StateDelta]] = {} + # The set of event IDs of membership events (so we can fetch their + # associated membership). + memberships_to_fetch: Set[str] = set() + + # TODO: Only pull out membership events? + state_changes = await self.store.get_current_state_deltas_for_rooms( + joined_room_ids, from_token=from_token.room_key, to_token=now_token.room_key + ) + for delta in state_changes: + if delta.event_type != EventTypes.Member: continue - # mapping from event_id -> state_dict - prev_state_ids = await self._state_storage.get_state_ids_for_events( - event_ids, - await_full_state=False, + room_to_deltas.setdefault(delta.room_id, []).append(delta) + if delta.event_id: + memberships_to_fetch.add(delta.event_id) + if delta.prev_event_id: + memberships_to_fetch.add(delta.prev_event_id) + + # Fetch all the memberships for the membership events + event_id_to_memberships: Mapping[str, Optional[EventIdMembership]] = {} + if memberships_to_fetch: + event_id_to_memberships = await self.store.get_membership_from_event_ids( + memberships_to_fetch ) - # Check if we've joined the room? If so we just blindly add all the users to - # the "possibly changed" users. - for state_dict in prev_state_ids.values(): - member_event = state_dict.get((EventTypes.Member, user_id), None) - if not member_event or member_event != current_member_id: - for etype, state_key in current_state_ids.keys(): - if etype != EventTypes.Member: - continue - possibly_changed.add(state_key) - break - - # If there has been any change in membership, include them in the - # possibly changed list. We'll check if they are joined below, - # and we're not toooo worried about spuriously adding users. - for key, event_id in current_state_ids.items(): - etype, state_key = key - if etype != EventTypes.Member: - continue - - # check if this member has changed since any of the extremities - # at the stream_ordering, and add them to the list if so. - for state_dict in prev_state_ids.values(): - prev_event_id = state_dict.get(key, None) - if not prev_event_id or prev_event_id != event_id: - if state_key != user_id: - possibly_changed.add(state_key) - break - - if possibly_changed or possibly_left: - possibly_joined = possibly_changed - possibly_left = possibly_changed | possibly_left - - # Double check if we still share rooms with the given user. - users_rooms = await self.store.get_rooms_for_users(possibly_left) - for changed_user_id, entries in users_rooms.items(): - if any(rid in room_ids for rid in entries): - possibly_left.discard(changed_user_id) - else: - possibly_joined.discard(changed_user_id) - - else: - possibly_joined = set() - possibly_left = set() + joined_invited_knocked = ( + Membership.JOIN, + Membership.INVITE, + Membership.KNOCK, + ) - device_list_updates = DeviceListUpdates( - changed=possibly_joined, - left=possibly_left, + # We now want to find any user that have newly joined/invited/knocked, + # or newly left, similarly to above. + newly_joined_or_invited_or_knocked_users: Set[str] = set() + newly_left_users: Set[str] = set() + for _, deltas in room_to_deltas.items(): + for delta in deltas: + # Get the prev/new memberships for the delta + new_membership = None + prev_membership = None + if delta.event_id: + m = event_id_to_memberships.get(delta.event_id) + if m is not None: + new_membership = m.membership + if delta.prev_event_id: + m = event_id_to_memberships.get(delta.prev_event_id) + if m is not None: + prev_membership = m.membership + + # Check if a user has newly joined/invited/knocked, or left. + if new_membership in joined_invited_knocked: + if prev_membership not in joined_invited_knocked: + newly_joined_or_invited_or_knocked_users.add(delta.state_key) + newly_left_users.discard(delta.state_key) + elif prev_membership in joined_invited_knocked: + newly_joined_or_invited_or_knocked_users.discard(delta.state_key) + newly_left_users.add(delta.state_key) + + # Now we actually calculate the device list entry with the information + # calculated above. + device_list_updates = await self.generate_sync_entry_for_device_list( + user_id=user_id, + since_token=from_token, + now_token=now_token, + joined_room_ids=joined_room_ids, + newly_joined_rooms=newly_joined_rooms, + newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users, + newly_left_rooms=newly_left_rooms, + newly_left_users=newly_left_users, ) log_kv( @@ -356,6 +354,87 @@ class DeviceWorkerHandler: return device_list_updates + async def generate_sync_entry_for_device_list( + self, + user_id: str, + since_token: StreamToken, + now_token: StreamToken, + joined_room_ids: AbstractSet[str], + newly_joined_rooms: AbstractSet[str], + newly_joined_or_invited_or_knocked_users: AbstractSet[str], + newly_left_rooms: AbstractSet[str], + newly_left_users: AbstractSet[str], + ) -> DeviceListUpdates: + """Generate the DeviceListUpdates section of sync + + Args: + sync_result_builder + newly_joined_rooms: Set of rooms user has joined since previous sync + newly_joined_or_invited_or_knocked_users: Set of users that have joined, + been invited to a room or are knocking on a room since + previous sync. + newly_left_rooms: Set of rooms user has left since previous sync + newly_left_users: Set of users that have left a room we're in since + previous sync + """ + # Take a copy since these fields will be mutated later. + newly_joined_or_invited_or_knocked_users = set( + newly_joined_or_invited_or_knocked_users + ) + newly_left_users = set(newly_left_users) + + # We want to figure out what user IDs the client should refetch + # device keys for, and which users we aren't going to track changes + # for anymore. + # + # For the first step we check: + # a. if any users we share a room with have updated their devices, + # and + # b. we also check if we've joined any new rooms, or if a user has + # joined a room we're in. + # + # For the second step we just find any users we no longer share a + # room with by looking at all users that have left a room plus users + # that were in a room we've left. + + users_that_have_changed = set() + + # Step 1a, check for changes in devices of users we share a room + # with + users_that_have_changed = await self.get_device_changes_in_shared_rooms( + user_id, + joined_room_ids, + from_token=since_token, + now_token=now_token, + ) + + # Step 1b, check for newly joined rooms + for room_id in newly_joined_rooms: + joined_users = await self.store.get_users_in_room(room_id) + newly_joined_or_invited_or_knocked_users.update(joined_users) + + # TODO: Check that these users are actually new, i.e. either they + # weren't in the previous sync *or* they left and rejoined. + users_that_have_changed.update(newly_joined_or_invited_or_knocked_users) + + user_signatures_changed = await self.store.get_users_whose_signatures_changed( + user_id, since_token.device_list_key + ) + users_that_have_changed.update(user_signatures_changed) + + # Now find users that we no longer track + for room_id in newly_left_rooms: + left_users = await self.store.get_users_in_room(room_id) + newly_left_users.update(left_users) + + # Remove any users that we still share a room with. + left_users_rooms = await self.store.get_rooms_for_users(newly_left_users) + for user_id, entries in left_users_rooms.items(): + if any(rid in joined_room_ids for rid in entries): + newly_left_users.discard(user_id) + + return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users) + async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: if not self.hs.is_mine(UserID.from_string(user_id)): raise SynapseError(400, "User is not hosted on this homeserver") @@ -653,6 +732,40 @@ class DeviceHandler(DeviceWorkerHandler): await self.notify_device_update(user_id, device_ids) + async def upsert_device( + self, user_id: str, device_id: str, display_name: Optional[str] = None + ) -> bool: + """Create or update a device + + Args: + user_id: The user to update devices of. + device_id: The device to update. + display_name: The new display name for this device. + + Returns: + True if the device was created, False if it was updated. + + """ + + # Reject a new displayname which is too long. + self._check_device_name_length(display_name) + + created = await self.store.store_device( + user_id, + device_id, + initial_device_display_name=display_name, + ) + + if not created: + await self.store.update_device( + user_id, + device_id, + new_display_name=display_name, + ) + + await self.notify_device_update(user_id, [device_id]) + return created + async def update_device(self, user_id: str, device_id: str, content: dict) -> None: """Update the given device @@ -1125,7 +1238,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): ) # Attempt to resync out of sync device lists every 30s. - self._resync_retry_in_progress = False + self._resync_retry_lock = Lock() self.clock.looping_call( run_as_background_process, 30 * 1000, @@ -1307,13 +1420,10 @@ class DeviceListUpdater(DeviceListWorkerUpdater): """Retry to resync device lists that are out of sync, except if another retry is in progress. """ - if self._resync_retry_in_progress: + # If the lock can not be acquired we want to always return immediately instead of blocking here + if not self._resync_retry_lock.acquire(blocking=False): return - try: - # Prevent another call of this function to retry resyncing device lists so - # we don't send too many requests. - self._resync_retry_in_progress = True # Get all of the users that need resyncing. need_resync = await self.store.get_user_ids_requiring_device_list_resync() @@ -1354,8 +1464,7 @@ class DeviceListUpdater(DeviceListWorkerUpdater): e, ) finally: - # Allow future calls to retry resyncinc out of sync device lists. - self._resync_retry_in_progress = False + self._resync_retry_lock.release() async def multi_user_device_resync( self, user_ids: List[str], mark_failed_as_stale: bool = True diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index ad2b0f5fcc..48c7d411d5 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py
@@ -21,9 +21,7 @@ import logging import string -from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence - -from typing_extensions import Literal +from typing import TYPE_CHECKING, Iterable, List, Literal, Optional, Sequence from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes from synapse.api.errors import ( @@ -265,9 +263,9 @@ class DirectoryHandler: async def get_association(self, room_alias: RoomAlias) -> JsonDict: room_id = None if self.hs.is_mine(room_alias): - result: Optional[RoomAliasMapping] = ( - await self.get_association_from_room_alias(room_alias) - ) + result: Optional[ + RoomAliasMapping + ] = await self.get_association_from_room_alias(room_alias) if result: room_id = result.room_id @@ -512,11 +510,9 @@ class DirectoryHandler: raise SynapseError(403, "Not allowed to publish room") # Check if publishing is blocked by a third party module - allowed_by_third_party_rules = ( - await ( - self._third_party_event_rules.check_visibility_can_be_modified( - room_id, visibility - ) + allowed_by_third_party_rules = await ( + self._third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility ) ) if not allowed_by_third_party_rules: diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index f78e66ad0a..6171aaf29f 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py
@@ -39,6 +39,8 @@ from synapse.replication.http.devices import ReplicationUploadKeysForUserRestSer from synapse.types import ( JsonDict, JsonMapping, + ScheduledTask, + TaskStatus, UserID, get_domain_from_id, get_verify_key_from_cross_signing_key, @@ -70,6 +72,7 @@ class E2eKeysHandler: self.is_mine = hs.is_mine self.clock = hs.get_clock() self._worker_lock_handler = hs.get_worker_locks_handler() + self._task_scheduler = hs.get_task_scheduler() federation_registry = hs.get_federation_registry() @@ -116,6 +119,10 @@ class E2eKeysHandler: hs.config.experimental.msc3984_appservice_key_query ) + self._task_scheduler.register_action( + self._delete_old_one_time_keys_task, "delete_old_otks" + ) + @trace @cancellable async def query_devices( @@ -151,7 +158,37 @@ class E2eKeysHandler: the number of in-flight queries at a time. """ async with self._query_devices_linearizer.queue((from_user_id, from_device_id)): - device_keys_query: Dict[str, List[str]] = query_body.get("device_keys", {}) + + async def filter_device_key_query( + query: Dict[str, List[str]], + ) -> Dict[str, List[str]]: + if not self.config.experimental.msc4263_limit_key_queries_to_users_who_share_rooms: + # Only ignore invalid user IDs, which is the same behaviour as if + # the user existed but had no keys. + return { + user_id: v + for user_id, v in query.items() + if UserID.is_valid(user_id) + } + + # Strip invalid user IDs and user IDs the requesting user does not share rooms with. + valid_user_ids = [ + user_id for user_id in query.keys() if UserID.is_valid(user_id) + ] + allowed_user_ids = set( + await self.store.do_users_share_a_room_joined_or_invited( + from_user_id, valid_user_ids + ) + ) + return { + user_id: v + for user_id, v in query.items() + if user_id in allowed_user_ids + } + + device_keys_query: Dict[str, List[str]] = await filter_device_key_query( + query_body.get("device_keys", {}) + ) # separate users by domain. # make a map from domain to user_id to device_ids @@ -159,11 +196,6 @@ class E2eKeysHandler: remote_queries = {} for user_id, device_ids in device_keys_query.items(): - if not UserID.is_valid(user_id): - # Ignore invalid user IDs, which is the same behaviour as if - # the user existed but had no keys. - continue - # we use UserID.from_string to catch invalid user ids if self.is_mine(UserID.from_string(user_id)): local_query[user_id] = device_ids @@ -615,7 +647,7 @@ class E2eKeysHandler: 3. Attempt to fetch fallback keys from the database. Args: - local_query: An iterable of tuples of (user ID, device ID, algorithm). + local_query: An iterable of tuples of (user ID, device ID, algorithm, number of keys). always_include_fallback_keys: True to always include fallback keys. Returns: @@ -1156,7 +1188,7 @@ class E2eKeysHandler: devices = devices[user_id] except SynapseError as e: failure = _exception_to_failure(e) - failures[user_id] = {device: failure for device in signatures.keys()} + failures[user_id] = dict.fromkeys(signatures.keys(), failure) return signature_list, failures for device_id, device in signatures.items(): @@ -1296,7 +1328,7 @@ class E2eKeysHandler: except SynapseError as e: failure = _exception_to_failure(e) for user, devicemap in signatures.items(): - failures[user] = {device_id: failure for device_id in devicemap.keys()} + failures[user] = dict.fromkeys(devicemap.keys(), failure) return signature_list, failures for target_user, devicemap in signatures.items(): @@ -1337,9 +1369,7 @@ class E2eKeysHandler: # other devices were signed -- mark those as failures logger.debug("upload signature: too many devices specified") failure = _exception_to_failure(NotFoundError("Unknown device")) - failures[target_user] = { - device: failure for device in other_devices - } + failures[target_user] = dict.fromkeys(other_devices, failure) if user_signing_key_id in master_key.get("signatures", {}).get( user_id, {} @@ -1360,9 +1390,7 @@ class E2eKeysHandler: except SynapseError as e: failure = _exception_to_failure(e) if device_id is None: - failures[target_user] = { - device_id: failure for device_id in devicemap.keys() - } + failures[target_user] = dict.fromkeys(devicemap.keys(), failure) else: failures.setdefault(target_user, {})[device_id] = failure @@ -1574,6 +1602,45 @@ class E2eKeysHandler: return True return False + async def _delete_old_one_time_keys_task( + self, task: ScheduledTask + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + """Scheduler task to delete old one time keys. + + Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility + that it could still have old OTKs that the client has dropped. This task is scheduled exactly once + by a database schema delta file, and it clears out old one-time-keys that look like they came from libolm. + """ + last_user = task.result.get("from_user", "") if task.result else "" + while True: + # We process users in batches of 100 + users, rowcount = await self.store.delete_old_otks_for_next_user_batch( + last_user, 100 + ) + if len(users) == 0: + # We're done! + return TaskStatus.COMPLETE, None, None + + logger.debug( + "Deleted %i old one-time-keys for users '%s'..'%s'", + rowcount, + users[0], + users[-1], + ) + last_user = users[-1] + + # Store our progress + await self._task_scheduler.update_task( + task.id, result={"from_user": last_user} + ) + + # Sleep a little before doing the next user. + # + # matrix.org has about 15M users in the e2e_one_time_keys_json table + # (comprising 20M devices). We want this to take about a week, so we need + # to do about one batch of 100 users every 4 seconds. + await self.clock.sleep(4) + def _check_cross_signing_key( key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py
index f397911f28..623fd33f13 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py
@@ -20,9 +20,7 @@ # import logging -from typing import TYPE_CHECKING, Dict, Optional, cast - -from typing_extensions import Literal +from typing import TYPE_CHECKING, Dict, Literal, Optional, cast from synapse.api.errors import ( Codes, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 299588e476..ff751d25f6 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py
@@ -78,6 +78,7 @@ from synapse.replication.http.federation import ( ReplicationStoreRoomOnOutlierMembershipRestServlet, ) from synapse.storage.databases.main.events_worker import EventRedactBehaviour +from synapse.storage.invite_rule import InviteRule from synapse.types import JsonDict, StrCollection, get_domain_from_id from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer @@ -210,7 +211,7 @@ class FederationHandler: @tag_args async def maybe_backfill( self, room_id: str, current_depth: int, limit: int, record_time: bool = True - ) -> bool: + ) -> None: """Checks the database to see if we should backfill before paginating, and if so do. @@ -224,8 +225,6 @@ class FederationHandler: should back paginate. record_time: Whether to record the time it takes to backfill. - Returns: - True if we actually tried to backfill something, otherwise False. """ # Starting the processing time here so we can include the room backfill # linearizer lock queue in the timing @@ -251,7 +250,7 @@ class FederationHandler: limit: int, *, processing_start_time: Optional[int], - ) -> bool: + ) -> None: """ Checks whether the `current_depth` is at or approaching any backfill points in the room and if so, will backfill. We only care about @@ -325,7 +324,7 @@ class FederationHandler: limit=1, ) if not have_later_backfill_points: - return False + return None logger.debug( "_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points." @@ -345,15 +344,15 @@ class FederationHandler: ) # We return `False` because we're backfilling in the background and there is # no new events immediately for the caller to know about yet. - return False + return None # Even after recursing with `MAX_DEPTH`, we didn't find any # backward extremities to backfill from. if not sorted_backfill_points: logger.debug( - "_maybe_backfill_inner: Not backfilling as no backward extremeties found." + "_maybe_backfill_inner: Not backfilling as no backward extremities found." ) - return False + return None # If we're approaching an extremity we trigger a backfill, otherwise we # no-op. @@ -372,7 +371,7 @@ class FederationHandler: current_depth, limit, ) - return False + return None # For performance's sake, we only want to paginate from a particular extremity # if we can actually see the events we'll get. Otherwise, we'd just spend a lot @@ -440,7 +439,7 @@ class FederationHandler: logger.debug( "_maybe_backfill_inner: found no extremities which would be visible" ) - return False + return None logger.debug( "_maybe_backfill_inner: extremities_to_request %s", extremities_to_request @@ -463,7 +462,7 @@ class FederationHandler: ) ) - async def try_backfill(domains: StrCollection) -> bool: + async def try_backfill(domains: StrCollection) -> None: # TODO: Should we try multiple of these at a time? # Number of contacted remote homeservers that have denied our backfill @@ -486,7 +485,7 @@ class FederationHandler: # If this succeeded then we probably already have the # appropriate stuff. # TODO: We can probably do something more intelligent here. - return True + return None except NotRetryingDestination as e: logger.info("_maybe_backfill_inner: %s", e) continue @@ -510,7 +509,7 @@ class FederationHandler: ) denied_count += 1 if denied_count >= max_denied_count: - return False + return None continue logger.info("Failed to backfill from %s because %s", dom, e) @@ -526,7 +525,7 @@ class FederationHandler: ) denied_count += 1 if denied_count >= max_denied_count: - return False + return None continue logger.info("Failed to backfill from %s because %s", dom, e) @@ -538,7 +537,7 @@ class FederationHandler: logger.exception("Failed to backfill from %s because %s", dom, e) continue - return False + return None # If we have the `processing_start_time`, then we can make an # observation. We wouldn't have the `processing_start_time` in the case @@ -550,14 +549,9 @@ class FederationHandler: (processing_end_time - processing_start_time) / 1000 ) - success = await try_backfill(likely_domains) - if success: - return True - # TODO: we could also try servers which were previously in the room, but # are no longer. - - return False + return await try_backfill(likely_domains) async def send_invite(self, target_host: str, event: EventBase) -> EventBase: """Sends the invite to the remote server for signing. @@ -880,6 +874,9 @@ class FederationHandler: if stripped_room_state is None: raise KeyError("Missing 'knock_room_state' field in send_knock response") + if not isinstance(stripped_room_state, list): + raise TypeError("'knock_room_state' has wrong type") + event.unsigned["knock_room_state"] = stripped_room_state context = EventContext.for_outlier(self._storage_controllers) @@ -1001,11 +998,11 @@ class FederationHandler: ) if include_auth_user_id: - event_content[EventContentFields.AUTHORISING_USER] = ( - await self._event_auth_handler.get_user_which_could_invite( - room_id, - state_ids, - ) + event_content[ + EventContentFields.AUTHORISING_USER + ] = await self._event_auth_handler.get_user_which_could_invite( + room_id, + state_ids, ) builder = self.event_builder_factory.for_room_version( @@ -1086,6 +1083,20 @@ class FederationHandler: if event.state_key == self._server_notices_mxid: raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user") + # check the invitee's configuration and apply rules + invite_config = await self.store.get_invite_config_for_user(event.state_key) + rule = invite_config.get_invite_rule(event.sender) + if rule == InviteRule.BLOCK: + logger.info( + f"Automatically rejecting invite from {event.sender} due to the invite filtering rules of {event.state_key}" + ) + raise SynapseError( + 403, + "You are not permitted to invite this user.", + errcode=Codes.INVITE_BLOCKED, + ) + # InviteRule.IGNORE is handled at the sync layer + # We retrieve the room member handler here as to not cause a cyclic dependency member_handler = self.hs.get_room_member_handler() # We don't rate limit based on room ID, as that should be done by @@ -1309,9 +1320,9 @@ class FederationHandler: if state_key is not None: # the event was not rejected (get_event raises a NotFoundError for rejected # events) so the state at the event should include the event itself. - assert ( - state_map.get((event.type, state_key)) == event.event_id - ), "State at event did not include event itself" + assert state_map.get((event.type, state_key)) == event.event_id, ( + "State at event did not include event itself" + ) # ... but we need the state *before* that event if "replaces_state" in event.unsigned: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index c85deaed56..1e738f484f 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py
@@ -151,6 +151,8 @@ class FederationEventHandler: def __init__(self, hs: "HomeServer"): self._clock = hs.get_clock() self._store = hs.get_datastores().main + self._state_store = hs.get_datastores().state + self._state_deletion_store = hs.get_datastores().state_deletion self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state @@ -580,7 +582,9 @@ class FederationEventHandler: room_version.identifier, state_maps_to_resolve, event_map=None, - state_res_store=StateResolutionStore(self._store), + state_res_store=StateResolutionStore( + self._store, self._state_deletion_store + ), ) ) else: @@ -1179,7 +1183,9 @@ class FederationEventHandler: room_version, state_maps, event_map={event_id: event}, - state_res_store=StateResolutionStore(self._store), + state_res_store=StateResolutionStore( + self._store, self._state_deletion_store + ), ) except Exception as e: @@ -1874,7 +1880,9 @@ class FederationEventHandler: room_version, [local_state_id_map, claimed_auth_events_id_map], event_map=None, - state_res_store=StateResolutionStore(self._store), + state_res_store=StateResolutionStore( + self._store, self._state_deletion_store + ), ) ) else: @@ -2014,7 +2022,9 @@ class FederationEventHandler: room_version, state_sets, event_map=None, - state_res_store=StateResolutionStore(self._store), + state_res_store=StateResolutionStore( + self._store, self._state_deletion_store + ), ) ) else: @@ -2272,8 +2282,9 @@ class FederationEventHandler: event_and_contexts, backfilled=backfilled ) - # After persistence we always need to notify replication there may - # be new data. + # After persistence, we never notify clients (wake up `/sync` streams) about + # backfilled events but it's important to let all the workers know about any + # new event (backfilled or not) because TODO self._notifier.notify_replication() if self._ephemeral_messages_enabled: diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py deleted file mode 100644
index cb31d65aa9..0000000000 --- a/synapse/handlers/identity.py +++ /dev/null
@@ -1,811 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2017 Vector Creations Ltd -# Copyright 2015, 2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -"""Utilities for interacting with Identity Servers""" -import logging -import urllib.parse -from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple - -import attr - -from synapse.api.errors import ( - CodeMessageException, - Codes, - HttpResponseException, - SynapseError, -) -from synapse.api.ratelimiting import Ratelimiter -from synapse.http import RequestTimedOutError -from synapse.http.client import SimpleHttpClient -from synapse.http.site import SynapseRequest -from synapse.types import JsonDict, Requester -from synapse.util import json_decoder -from synapse.util.hash import sha256_and_url_safe_base64 -from synapse.util.stringutils import ( - assert_valid_client_secret, - random_string, - valid_id_server_location, -) - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - -id_server_scheme = "https://" - - -class IdentityHandler: - def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastores().main - # An HTTP client for contacting trusted URLs. - self.http_client = SimpleHttpClient(hs) - # An HTTP client for contacting identity servers specified by clients. - self._http_client = SimpleHttpClient( - hs, - ip_blocklist=hs.config.server.federation_ip_range_blocklist, - ip_allowlist=hs.config.server.federation_ip_range_allowlist, - ) - self.federation_http_client = hs.get_federation_http_client() - self.hs = hs - - self._web_client_location = hs.config.email.invite_client_location - - # Ratelimiters for `/requestToken` endpoints. - self._3pid_validation_ratelimiter_ip = Ratelimiter( - store=self.store, - clock=hs.get_clock(), - cfg=hs.config.ratelimiting.rc_3pid_validation, - ) - self._3pid_validation_ratelimiter_address = Ratelimiter( - store=self.store, - clock=hs.get_clock(), - cfg=hs.config.ratelimiting.rc_3pid_validation, - ) - - async def ratelimit_request_token_requests( - self, - request: SynapseRequest, - medium: str, - address: str, - ) -> None: - """Used to ratelimit requests to `/requestToken` by IP and address. - - Args: - request: The associated request - medium: The type of threepid, e.g. "msisdn" or "email" - address: The actual threepid ID, e.g. the phone number or email address - """ - - await self._3pid_validation_ratelimiter_ip.ratelimit( - None, (medium, request.getClientAddress().host) - ) - await self._3pid_validation_ratelimiter_address.ratelimit( - None, (medium, address) - ) - - async def threepid_from_creds( - self, id_server: str, creds: Dict[str, str] - ) -> Optional[JsonDict]: - """ - Retrieve and validate a threepid identifier from a "credentials" dictionary against a - given identity server - - Args: - id_server: The identity server to validate 3PIDs against. Must be a - complete URL including the protocol (http(s)://) - creds: Dictionary containing the following keys: - * client_secret|clientSecret: A unique secret str provided by the client - * sid: The ID of the validation session - - Returns: - A dictionary consisting of response params to the /getValidated3pid - endpoint of the Identity Service API, or None if the threepid was not found - """ - client_secret = creds.get("client_secret") or creds.get("clientSecret") - if not client_secret: - raise SynapseError( - 400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM - ) - assert_valid_client_secret(client_secret) - - session_id = creds.get("sid") - if not session_id: - raise SynapseError( - 400, "Missing param session_id in creds", errcode=Codes.MISSING_PARAM - ) - - query_params = {"sid": session_id, "client_secret": client_secret} - - url = id_server + "/_matrix/identity/api/v1/3pid/getValidated3pid" - - try: - data = await self.http_client.get_json(url, query_params) - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - except HttpResponseException as e: - logger.info( - "%s returned %i for threepid validation for: %s", - id_server, - e.code, - creds, - ) - return None - - # Old versions of Sydent return a 200 http code even on a failed validation - # check. Thus, in addition to the HttpResponseException check above (which - # checks for non-200 errors), we need to make sure validation_session isn't - # actually an error, identified by the absence of a "medium" key - # See https://github.com/matrix-org/sydent/issues/215 for details - if "medium" in data: - return data - - logger.info("%s reported non-validated threepid: %s", id_server, creds) - return None - - async def bind_threepid( - self, - client_secret: str, - sid: str, - mxid: str, - id_server: str, - id_access_token: str, - ) -> JsonDict: - """Bind a 3PID to an identity server - - Args: - client_secret: A unique secret provided by the client - sid: The ID of the validation session - mxid: The MXID to bind the 3PID to - id_server: The domain of the identity server to query - id_access_token: The access token to authenticate to the identity - server with - - Raises: - SynapseError: On any of the following conditions - - the supplied id_server is not a valid identity server name - - we failed to contact the supplied identity server - - Returns: - The response from the identity server - """ - logger.debug("Proxying threepid bind request for %s to %s", mxid, id_server) - - if not valid_id_server_location(id_server): - raise SynapseError( - 400, - "id_server must be a valid hostname with optional port and path components", - ) - - bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid} - bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,) - headers = {"Authorization": create_id_access_token_header(id_access_token)} - - try: - # Use the blacklisting http client as this call is only to identity servers - # provided by a client - data = await self._http_client.post_json_get_json( - bind_url, bind_data, headers=headers - ) - - # Remember where we bound the threepid - await self.store.add_user_bound_threepid( - user_id=mxid, - medium=data["medium"], - address=data["address"], - id_server=id_server, - ) - - return data - except HttpResponseException as e: - logger.error("3PID bind failed with Matrix error: %r", e) - raise e.to_synapse_error() - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - except CodeMessageException as e: - data = json_decoder.decode(e.msg) # XXX WAT? - return data - - async def try_unbind_threepid( - self, mxid: str, medium: str, address: str, id_server: Optional[str] - ) -> bool: - """Attempt to remove a 3PID from one or more identity servers. - - Args: - mxid: Matrix user ID of binding to be removed - medium: The medium of the third-party ID. - address: The address of the third-party ID. - id_server: An identity server to attempt to unbind from. If None, - attempt to remove the association from all identity servers - known to potentially have it. - - Raises: - SynapseError: If we failed to contact one or more identity servers. - - Returns: - True on success, otherwise False if the identity server doesn't - support unbinding (or no identity server to contact was found). - """ - if id_server: - id_servers = [id_server] - else: - id_servers = await self.store.get_id_servers_user_bound( - mxid, medium, address - ) - - # We don't know where to unbind, so we don't have a choice but to return - if not id_servers: - return False - - changed = True - for id_server in id_servers: - changed &= await self._try_unbind_threepid_with_id_server( - mxid, medium, address, id_server - ) - - return changed - - async def _try_unbind_threepid_with_id_server( - self, mxid: str, medium: str, address: str, id_server: str - ) -> bool: - """Removes a binding from an identity server - - Args: - mxid: Matrix user ID of binding to be removed - medium: The medium of the third-party ID - address: The address of the third-party ID - id_server: Identity server to unbind from - - Raises: - SynapseError: On any of the following conditions - - the supplied id_server is not a valid identity server name - - we failed to contact the supplied identity server - - Returns: - True on success, otherwise False if the identity - server doesn't support unbinding - """ - - if not valid_id_server_location(id_server): - raise SynapseError( - 400, - "id_server must be a valid hostname with optional port and path components", - ) - - url = "https://%s/_matrix/identity/v2/3pid/unbind" % (id_server,) - url_bytes = b"/_matrix/identity/v2/3pid/unbind" - - content = { - "mxid": mxid, - "threepid": {"medium": medium, "address": address}, - } - - # we abuse the federation http client to sign the request, but we have to send it - # using the normal http client since we don't want the SRV lookup and want normal - # 'browser-like' HTTPS. - auth_headers = self.federation_http_client.build_auth_headers( - destination=None, - method=b"POST", - url_bytes=url_bytes, - content=content, - destination_is=id_server.encode("ascii"), - ) - headers = {b"Authorization": auth_headers} - - try: - # Use the blacklisting http client as this call is only to identity servers - # provided by a client - await self._http_client.post_json_get_json(url, content, headers) - changed = True - except HttpResponseException as e: - changed = False - if e.code in (400, 404, 501): - # The remote server probably doesn't support unbinding (yet) - logger.warning("Received %d response while unbinding threepid", e.code) - else: - logger.error("Failed to unbind threepid on identity server: %s", e) - raise SynapseError(500, "Failed to contact identity server") - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - - await self.store.remove_user_bound_threepid(mxid, medium, address, id_server) - - return changed - - async def send_threepid_validation( - self, - email_address: str, - client_secret: str, - send_attempt: int, - send_email_func: Callable[[str, str, str, str], Awaitable], - next_link: Optional[str] = None, - ) -> str: - """Send a threepid validation email for password reset or - registration purposes - - Args: - email_address: The user's email address - client_secret: The provided client secret - send_attempt: Which send attempt this is - send_email_func: A function that takes an email address, token, - client_secret and session_id, sends an email - and returns an Awaitable. - next_link: The URL to redirect the user to after validation - - Returns: - The new session_id upon success - - Raises: - SynapseError is an error occurred when sending the email - """ - # Check that this email/client_secret/send_attempt combo is new or - # greater than what we've seen previously - session = await self.store.get_threepid_validation_session( - "email", client_secret, address=email_address, validated=False - ) - - # Check to see if a session already exists and that it is not yet - # marked as validated - if session and session.validated_at is None: - session_id = session.session_id - last_send_attempt = session.last_send_attempt - - # Check that the send_attempt is higher than previous attempts - if send_attempt <= last_send_attempt: - # If not, just return a success without sending an email - return session_id - else: - # An non-validated session does not exist yet. - # Generate a session id - session_id = random_string(16) - - if next_link: - # Manipulate the next_link to add the sid, because the caller won't get - # it until we send a response, by which time we've sent the mail. - if "?" in next_link: - next_link += "&" - else: - next_link += "?" - next_link += "sid=" + urllib.parse.quote(session_id) - - # Generate a new validation token - token = random_string(32) - - # Send the mail with the link containing the token, client_secret - # and session_id - try: - await send_email_func(email_address, token, client_secret, session_id) - except Exception: - logger.exception( - "Error sending threepid validation email to %s", email_address - ) - raise SynapseError(500, "An error was encountered when sending the email") - - token_expires = ( - self.hs.get_clock().time_msec() - + self.hs.config.email.email_validation_token_lifetime - ) - - await self.store.start_or_continue_validation_session( - "email", - email_address, - session_id, - client_secret, - send_attempt, - next_link, - token, - token_expires, - ) - - return session_id - - async def requestMsisdnToken( - self, - id_server: str, - country: str, - phone_number: str, - client_secret: str, - send_attempt: int, - next_link: Optional[str] = None, - ) -> JsonDict: - """ - Request an external server send an SMS message on our behalf for the purposes of - threepid validation. - Args: - id_server: The identity server to proxy to - country: The country code of the phone number - phone_number: The number to send the message to - client_secret: The unique client_secret sends by the user - send_attempt: Which attempt this is - next_link: A link to redirect the user to once they submit the token - - Returns: - The json response body from the server - """ - params = { - "country": country, - "phone_number": phone_number, - "client_secret": client_secret, - "send_attempt": send_attempt, - } - if next_link: - params["next_link"] = next_link - - try: - data = await self.http_client.post_json_get_json( - id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken", - params, - ) - except HttpResponseException as e: - logger.info("Proxied requestToken failed: %r", e) - raise e.to_synapse_error() - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - - # we need to tell the client to send the token back to us, since it doesn't - # otherwise know where to send it, so add submit_url response parameter - # (see also MSC2078) - data["submit_url"] = ( - self.hs.config.server.public_baseurl - + "_matrix/client/unstable/add_threepid/msisdn/submit_token" - ) - return data - - async def validate_threepid_session( - self, client_secret: str, sid: str - ) -> Optional[JsonDict]: - """Validates a threepid session with only the client secret and session ID - Tries validating against any configured account_threepid_delegates as well as locally. - - Args: - client_secret: A secret provided by the client - sid: The ID of the session - - Returns: - The json response if validation was successful, otherwise None - """ - # XXX: We shouldn't need to keep wrapping and unwrapping this value - threepid_creds = {"client_secret": client_secret, "sid": sid} - - # We don't actually know which medium this 3PID is. Thus we first assume it's email, - # and if validation fails we try msisdn - - # Try to validate as email - if self.hs.config.email.can_verify_email: - # Get a validated session matching these details - validation_session = await self.store.get_threepid_validation_session( - "email", client_secret, sid=sid, validated=True - ) - if validation_session: - return attr.asdict(validation_session) - - # Try to validate as msisdn - if self.hs.config.registration.account_threepid_delegate_msisdn: - # Ask our delegated msisdn identity server - return await self.threepid_from_creds( - self.hs.config.registration.account_threepid_delegate_msisdn, - threepid_creds, - ) - - return None - - async def proxy_msisdn_submit_token( - self, id_server: str, client_secret: str, sid: str, token: str - ) -> JsonDict: - """Proxy a POST submitToken request to an identity server for verification purposes - - Args: - id_server: The identity server URL to contact - client_secret: Secret provided by the client - sid: The ID of the session - token: The verification token - - Raises: - SynapseError: If we failed to contact the identity server - - Returns: - The response dict from the identity server - """ - body = {"client_secret": client_secret, "sid": sid, "token": token} - - try: - return await self.http_client.post_json_get_json( - id_server + "/_matrix/identity/api/v1/validate/msisdn/submitToken", - body, - ) - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - except HttpResponseException as e: - logger.warning("Error contacting msisdn account_threepid_delegate: %s", e) - raise SynapseError(400, "Error contacting the identity server") - - async def lookup_3pid( - self, id_server: str, medium: str, address: str, id_access_token: str - ) -> Optional[str]: - """Looks up a 3pid in the passed identity server. - - Args: - id_server: The server name (including port, if required) - of the identity server to use. - medium: The type of the third party identifier (e.g. "email"). - address: The third party identifier (e.g. "foo@example.com"). - id_access_token: The access token to authenticate to the identity - server with - - Returns: - the matrix ID of the 3pid, or None if it is not recognized. - """ - - try: - results = await self._lookup_3pid_v2( - id_server, id_access_token, medium, address - ) - return results - except Exception as e: - logger.warning("Error when looking up hashing details: %s", e) - return None - - async def _lookup_3pid_v2( - self, id_server: str, id_access_token: str, medium: str, address: str - ) -> Optional[str]: - """Looks up a 3pid in the passed identity server using v2 lookup. - - Args: - id_server: The server name (including port, if required) - of the identity server to use. - id_access_token: The access token to authenticate to the identity server with - medium: The type of the third party identifier (e.g. "email"). - address: The third party identifier (e.g. "foo@example.com"). - - Returns: - the matrix ID of the 3pid, or None if it is not recognised. - """ - # Check what hashing details are supported by this identity server - try: - hash_details = await self._http_client.get_json( - "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), - {"access_token": id_access_token}, - ) - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - - if not isinstance(hash_details, dict): - logger.warning( - "Got non-dict object when checking hash details of %s%s: %s", - id_server_scheme, - id_server, - hash_details, - ) - raise SynapseError( - 400, - "Non-dict object from %s%s during v2 hash_details request: %s" - % (id_server_scheme, id_server, hash_details), - ) - - # Extract information from hash_details - supported_lookup_algorithms = hash_details.get("algorithms") - lookup_pepper = hash_details.get("lookup_pepper") - if ( - not supported_lookup_algorithms - or not isinstance(supported_lookup_algorithms, list) - or not lookup_pepper - or not isinstance(lookup_pepper, str) - ): - raise SynapseError( - 400, - "Invalid hash details received from identity server %s%s: %s" - % (id_server_scheme, id_server, hash_details), - ) - - # Check if any of the supported lookup algorithms are present - if LookupAlgorithm.SHA256 in supported_lookup_algorithms: - # Perform a hashed lookup - lookup_algorithm = LookupAlgorithm.SHA256 - - # Hash address, medium and the pepper with sha256 - to_hash = "%s %s %s" % (address, medium, lookup_pepper) - lookup_value = sha256_and_url_safe_base64(to_hash) - - elif LookupAlgorithm.NONE in supported_lookup_algorithms: - # Perform a non-hashed lookup - lookup_algorithm = LookupAlgorithm.NONE - - # Combine together plaintext address and medium - lookup_value = "%s %s" % (address, medium) - - else: - logger.warning( - "None of the provided lookup algorithms of %s are supported: %s", - id_server, - supported_lookup_algorithms, - ) - raise SynapseError( - 400, - "Provided identity server does not support any v2 lookup " - "algorithms that this homeserver supports.", - ) - - # Authenticate with identity server given the access token from the client - headers = {"Authorization": create_id_access_token_header(id_access_token)} - - try: - lookup_results = await self._http_client.post_json_get_json( - "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), - { - "addresses": [lookup_value], - "algorithm": lookup_algorithm, - "pepper": lookup_pepper, - }, - headers=headers, - ) - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - except Exception as e: - logger.warning("Error when performing a v2 3pid lookup: %s", e) - raise SynapseError( - 500, "Unknown error occurred during identity server lookup" - ) - - # Check for a mapping from what we looked up to an MXID - if "mappings" not in lookup_results or not isinstance( - lookup_results["mappings"], dict - ): - logger.warning("No results from 3pid lookup") - return None - - # Return the MXID if it's available, or None otherwise - mxid = lookup_results["mappings"].get(lookup_value) - return mxid - - async def ask_id_server_for_third_party_invite( - self, - requester: Requester, - id_server: str, - medium: str, - address: str, - room_id: str, - inviter_user_id: str, - room_alias: str, - room_avatar_url: str, - room_join_rules: str, - room_name: str, - room_type: Optional[str], - inviter_display_name: str, - inviter_avatar_url: str, - id_access_token: str, - ) -> Tuple[str, List[Dict[str, str]], Dict[str, str], str]: - """ - Asks an identity server for a third party invite. - - Args: - requester - id_server: hostname + optional port for the identity server. - medium: The literal string "email". - address: The third party address being invited. - room_id: The ID of the room to which the user is invited. - inviter_user_id: The user ID of the inviter. - room_alias: An alias for the room, for cosmetic notifications. - room_avatar_url: The URL of the room's avatar, for cosmetic - notifications. - room_join_rules: The join rules of the email (e.g. "public"). - room_name: The m.room.name of the room. - room_type: The type of the room from its m.room.create event (e.g "m.space"). - inviter_display_name: The current display name of the - inviter. - inviter_avatar_url: The URL of the inviter's avatar. - id_access_token: The access token to authenticate to the identity - server with - - Returns: - A tuple containing: - token: The token which must be signed to prove authenticity. - public_keys ([{"public_key": str, "key_validity_url": str}]): - public_key is a base64-encoded ed25519 public key. - fallback_public_key: One element from public_keys. - display_name: A user-friendly name to represent the invited user. - """ - invite_config = { - "medium": medium, - "address": address, - "room_id": room_id, - "room_alias": room_alias, - "room_avatar_url": room_avatar_url, - "room_join_rules": room_join_rules, - "room_name": room_name, - "sender": inviter_user_id, - "sender_display_name": inviter_display_name, - "sender_avatar_url": inviter_avatar_url, - } - - if room_type is not None: - invite_config["room_type"] = room_type - - # If a custom web client location is available, include it in the request. - if self._web_client_location: - invite_config["org.matrix.web_client_location"] = self._web_client_location - - # Add the identity service access token to the JSON body and use the v2 - # Identity Service endpoints - data = None - - key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % ( - id_server_scheme, - id_server, - ) - - url = "%s%s/_matrix/identity/v2/store-invite" % (id_server_scheme, id_server) - try: - data = await self._http_client.post_json_get_json( - url, - invite_config, - {"Authorization": create_id_access_token_header(id_access_token)}, - ) - except RequestTimedOutError: - raise SynapseError(500, "Timed out contacting identity server") - - token = data["token"] - public_keys = data.get("public_keys", []) - if "public_key" in data: - fallback_public_key = { - "public_key": data["public_key"], - "key_validity_url": key_validity_url, - } - else: - fallback_public_key = public_keys[0] - - if not public_keys: - public_keys.append(fallback_public_key) - display_name = data["display_name"] - return token, public_keys, fallback_public_key, display_name - - -def create_id_access_token_header(id_access_token: str) -> List[str]: - """Create an Authorization header for passing to SimpleHttpClient as the header value - of an HTTP request. - - Args: - id_access_token: An identity server access token. - - Returns: - The ascii-encoded bearer token encased in a list. - """ - # Prefix with Bearer - bearer_token = "Bearer %s" % id_access_token - - # Encode headers to standard ascii - bearer_token.encode("ascii") - - # Return as a list as that's how SimpleHttpClient takes header values - return [bearer_token] - - -class LookupAlgorithm: - """ - Supported hashing algorithms when performing a 3PID lookup. - - SHA256 - Hashing an (address, medium, pepper) combo with sha256, then url-safe base64 - encoding - NONE - Not performing any hashing. Simply sending an (address, medium) combo in plaintext - """ - - SHA256 = "sha256" - NONE = "none" diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py
index 5fa7a305ad..400f3a59aa 100644 --- a/synapse/handlers/jwt.py +++ b/synapse/handlers/jwt.py
@@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional, Tuple from authlib.jose import JsonWebToken, JWTClaims from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError @@ -36,11 +36,12 @@ class JwtHandler: self.jwt_secret = hs.config.jwt.jwt_secret self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim + self.jwt_display_name_claim = hs.config.jwt.jwt_display_name_claim self.jwt_algorithm = hs.config.jwt.jwt_algorithm self.jwt_issuer = hs.config.jwt.jwt_issuer self.jwt_audiences = hs.config.jwt.jwt_audiences - def validate_login(self, login_submission: JsonDict) -> str: + def validate_login(self, login_submission: JsonDict) -> Tuple[str, Optional[str]]: """ Authenticates the user for the /login API @@ -49,7 +50,8 @@ class JwtHandler: (including 'type' and other relevant fields) Returns: - The user ID that is logging in. + A tuple of (user_id, display_name) of the user that is logging in. + If the JWT does not contain a display name, the second element of the tuple will be None. Raises: LoginError if there was an authentication problem. @@ -109,4 +111,10 @@ class JwtHandler: if user is None: raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN) - return UserID(user, self.hs.hostname).to_string() + default_display_name = None + if self.jwt_display_name_claim: + display_name_claim = claims.get(self.jwt_display_name_claim) + if display_name_claim is not None: + default_display_name = display_name_claim + + return UserID(user, self.hs.hostname).to_string(), default_display_name diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 5aa48230ec..cb6de02309 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py
@@ -143,9 +143,9 @@ class MessageHandler: elif membership == Membership.LEAVE: key = (event_type, state_key) # If the membership is not JOIN, then the event ID should exist. - assert ( - membership_event_id is not None - ), "check_user_in_room_or_world_readable returned invalid data" + assert membership_event_id is not None, ( + "check_user_in_room_or_world_readable returned invalid data" + ) room_state = await self._state_storage_controller.get_state_for_events( [membership_event_id], StateFilter.from_types([key]) ) @@ -196,7 +196,9 @@ class MessageHandler: AuthError (403) if the user doesn't have permission to view members of this room. """ - state_filter = state_filter or StateFilter.all() + if state_filter is None: + state_filter = StateFilter.all() + user_id = requester.user.to_string() if at_token: @@ -240,9 +242,9 @@ class MessageHandler: room_state = await self.store.get_events(state_ids.values()) elif membership == Membership.LEAVE: # If the membership is not JOIN, then the event ID should exist. - assert ( - membership_event_id is not None - ), "check_user_in_room_or_world_readable returned invalid data" + assert membership_event_id is not None, ( + "check_user_in_room_or_world_readable returned invalid data" + ) room_state_events = ( await self._state_storage_controller.get_state_for_events( [membership_event_id], state_filter=state_filter @@ -493,6 +495,7 @@ class EventCreationHandler: self._instance_name = hs.get_instance_name() self._notifier = hs.get_notifier() self._worker_lock_handler = hs.get_worker_locks_handler() + self._policy_handler = hs.get_room_policy_handler() self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state @@ -642,11 +645,33 @@ class EventCreationHandler: """ await self.auth_blocking.check_auth_blocking(requester=requester) - if event_dict["type"] == EventTypes.Message: - requester_suspended = await self.store.get_user_suspended_status( - requester.user.to_string() - ) - if requester_suspended: + requester_suspended = await self.store.get_user_suspended_status( + requester.user.to_string() + ) + if requester_suspended: + # We want to allow suspended users to perform "corrective" actions + # asked of them by server admins, such as redact their messages and + # leave rooms. + if event_dict["type"] in ["m.room.redaction", "m.room.member"]: + if event_dict["type"] == "m.room.redaction": + event = await self.store.get_event( + event_dict["content"]["redacts"], allow_none=True + ) + if event: + if event.sender != requester.user.to_string(): + raise SynapseError( + 403, + "You can only redact your own events while account is suspended.", + Codes.USER_ACCOUNT_SUSPENDED, + ) + if event_dict["type"] == "m.room.member": + if event_dict["content"]["membership"] != "leave": + raise SynapseError( + 403, + "Changing membership while account is suspended is not allowed.", + Codes.USER_ACCOUNT_SUSPENDED, + ) + else: raise SynapseError( 403, "Sending messages while account is suspended is not allowed.", @@ -1084,6 +1109,18 @@ class EventCreationHandler: event.sender, ) + policy_allowed = await self._policy_handler.is_event_allowed(event) + if not policy_allowed: + logger.warning( + "Event not allowed by policy server, rejecting %s", + event.event_id, + ) + raise SynapseError( + 403, + "This message has been rejected as probable spam", + Codes.FORBIDDEN, + ) + spam_check_result = ( await self._spam_checker_module_callbacks.check_event_for_spam( event @@ -1095,7 +1132,7 @@ class EventCreationHandler: [code, dict] = spam_check_result raise SynapseError( 403, - "This message had been rejected as probable spam", + "This message has been rejected as probable spam", code, dict, ) @@ -1225,10 +1262,9 @@ class EventCreationHandler: ) if prev_event_ids is not None: - assert ( - len(prev_event_ids) <= 10 - ), "Attempting to create an event with %i prev_events" % ( - len(prev_event_ids), + assert len(prev_event_ids) <= 10, ( + "Attempting to create an event with %i prev_events" + % (len(prev_event_ids),) ) else: prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id) @@ -1243,12 +1279,14 @@ class EventCreationHandler: # Allow an event to have empty list of prev_event_ids # only if it has auth_event_ids. or auth_event_ids - ), "Attempting to create a non-m.room.create event with no prev_events or auth_event_ids" + ), ( + "Attempting to create a non-m.room.create event with no prev_events or auth_event_ids" + ) else: # we now ought to have some prev_events (unless it's a create event). - assert ( - builder.type == EventTypes.Create or prev_event_ids - ), "Attempting to create a non-m.room.create event with no prev_events" + assert builder.type == EventTypes.Create or prev_event_ids, ( + "Attempting to create a non-m.room.create event with no prev_events" + ) if for_batch: assert prev_event_ids is not None @@ -1439,6 +1477,12 @@ class EventCreationHandler: ) return prev_event + if not event.is_state() and event.type in [ + EventTypes.Message, + EventTypes.Encrypted, + ]: + await self.store.set_room_participation(event.user_id, event.room_id) + if event.internal_metadata.is_out_of_band_membership(): # the only sort of out-of-band-membership events we expect to see here are # invite rejections and rescinded knocks that we have generated ourselves. diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py
index 22b59829fa..4b85282c1e 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py
@@ -31,6 +31,7 @@ from typing import ( List, Optional, Type, + TypedDict, TypeVar, Union, ) @@ -52,7 +53,6 @@ from pymacaroons.exceptions import ( MacaroonInitException, MacaroonInvalidSignatureException, ) -from typing_extensions import TypedDict from twisted.web.client import readBody from twisted.web.http_headers import Headers @@ -382,7 +382,12 @@ class OidcProvider: self._macaroon_generaton = macaroon_generator self._config = provider - self._callback_url: str = hs.config.oidc.oidc_callback_url + + self._callback_url: str + if provider.redirect_uri is not None: + self._callback_url = provider.redirect_uri + else: + self._callback_url = hs.config.oidc.oidc_callback_url # Calculate the prefix for OIDC callback paths based on the public_baseurl. # We'll insert this into the Path= parameter of any session cookies we set. @@ -462,6 +467,10 @@ class OidcProvider: self._sso_handler.register_identity_provider(self) + self.passthrough_authorization_parameters = ( + provider.passthrough_authorization_parameters + ) + def _validate_metadata(self, m: OpenIDProviderMetadata) -> None: """Verifies the provider metadata. @@ -578,6 +587,24 @@ class OidcProvider: ) @property + def _uses_access_token(self) -> bool: + """Return True if the `access_token` will be used during the login process. + + This is useful to determine whether the access token + returned by the identity provider, and + any related metadata (such as the `at_hash` field in + the ID token), should be validated. + """ + # Currently, Synapse only uses the access_token to fetch user metadata + # from the userinfo endpoint. Therefore we only have a single criteria + # to check right now but this may change in the future and this function + # should be updated if more usages are introduced. + # + # For example, if we start to use the access_token given to us by the + # IdP for more things, such as accessing Resource Server APIs. + return self._uses_userinfo + + @property def issuer(self) -> str: """The issuer identifying this provider.""" return self._config.issuer @@ -640,6 +667,11 @@ class OidcProvider: elif self._config.pkce_method == "never": metadata.pop("code_challenge_methods_supported", None) + if self._config.id_token_signing_alg_values_supported: + metadata["id_token_signing_alg_values_supported"] = ( + self._config.id_token_signing_alg_values_supported + ) + self._validate_metadata(metadata) return metadata @@ -943,9 +975,16 @@ class OidcProvider: "nonce": nonce, "client_id": self._client_auth.client_id, } - if "access_token" in token: + if self._uses_access_token and "access_token" in token: # If we got an `access_token`, there should be an `at_hash` claim - # in the `id_token` that we can check against. + # in the `id_token` that we can check against. Setting this + # instructs authlib to check the value of `at_hash` in the + # ID token. + # + # We only need to verify the access token if we actually make + # use of it. Which currently only happens when we need to fetch + # the user's information from the userinfo_endpoint. Thus, this + # check is also gated on self._uses_userinfo. claims_params["access_token"] = token["access_token"] claims_options = {"iss": {"values": [metadata["issuer"]]}} @@ -995,14 +1034,27 @@ class OidcProvider: when everything is done (or None for UI Auth) ui_auth_session_id: The session ID of the ongoing UI Auth (or None if this is a login). - Returns: The redirect URL to the authorization endpoint. """ state = generate_token() - nonce = generate_token() + + # Generate a nonce 32 characters long. When encoded with base64url later on, + # the nonce will be 43 characters when sent to the identity provider. + # + # While RFC7636 does not specify a minimum length for the `nonce` + # parameter, the TI-Messenger IDP_FD spec v1.7.3 does require it to be + # between 43 and 128 characters. This spec concerns using Matrix for + # communication in German healthcare. + # + # As increasing the length only strengthens security, we use this length + # to allow TI-Messenger deployments using Synapse to satisfy this + # external spec. + # + # See https://github.com/element-hq/synapse/pull/18109 for more context. + nonce = generate_token(length=32) code_verifier = "" if not client_redirect_url: @@ -1054,6 +1106,13 @@ class OidcProvider: ) ) + # add passthrough additional authorization parameters + passthrough_authorization_parameters = self.passthrough_authorization_parameters + for parameter in passthrough_authorization_parameters: + parameter_value = parse_string(request, parameter) + if parameter_value: + additional_authorization_parameters.update({parameter: parameter_value}) + authorization_endpoint = metadata.get("authorization_endpoint") return prepare_grant_uri( authorization_endpoint, @@ -1716,17 +1775,12 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): if display_name == "": display_name = None - emails: List[str] = [] - email = render_template_field(self._config.email_template) - if email: - emails.append(email) - picture = self._config.picture_template.render(user=userinfo).strip() return UserAttributeDict( localpart=localpart, display_name=display_name, - emails=emails, + emails=[], # 3PIDs are not supported picture=picture, confirm_localpart=self._config.confirm_localpart, ) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 6fd7afa280..365c9cabcb 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py
@@ -507,15 +507,17 @@ class PaginationHandler: # Initially fetch the events from the database. With any luck, we can return # these without blocking on backfill (handled below). - events, next_key = ( - await self.store.paginate_room_events_by_topological_ordering( - room_id=room_id, - from_key=from_token.room_key, - to_key=to_room_key, - direction=pagin_config.direction, - limit=pagin_config.limit, - event_filter=event_filter, - ) + ( + events, + next_key, + limited, + ) = await self.store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, + event_filter=event_filter, ) if pagin_config.direction == Direction.BACKWARDS: @@ -575,25 +577,31 @@ class PaginationHandler: or missing_too_many_events or not_enough_events_to_fill_response ): - did_backfill = await self.hs.get_federation_handler().maybe_backfill( + # Historical Note: There used to be a check here for if backfill was + # successful or not + await self.hs.get_federation_handler().maybe_backfill( room_id, curr_topo, limit=pagin_config.limit, ) - # If we did backfill something, refetch the events from the database to - # catch anything new that might have been added since we last fetched. - if did_backfill: - events, next_key = ( - await self.store.paginate_room_events_by_topological_ordering( - room_id=room_id, - from_key=from_token.room_key, - to_key=to_room_key, - direction=pagin_config.direction, - limit=pagin_config.limit, - event_filter=event_filter, - ) - ) + # Regardless if we backfilled or not, another worker or even a + # simultaneous request may have backfilled for us while we were held + # behind the linearizer. This should not have too much additional + # database load as it will only be triggered if a backfill *might* have + # been needed + ( + events, + next_key, + limited, + ) = await self.store.paginate_room_events_by_topological_ordering( + room_id=room_id, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, + event_filter=event_filter, + ) else: # Otherwise, we can backfill in the background for eventual # consistency's sake but we don't need to block the client waiting @@ -608,6 +616,15 @@ class PaginationHandler: next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key) + # We might have hit some internal filtering first, for example rejected + # events. Ensure we return a pagination token then. + if not events and limited: + return { + "chunk": [], + "start": await from_token.to_string(self.store), + "end": await next_token.to_string(self.store), + } + # if no events are returned from pagination, that implies # we have reached the end of the available events. # In that case we do not return end, to tell the client diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 37ee625f71..390cafa8f6 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py
@@ -71,6 +71,7 @@ user state; this device follows the normal timeout logic (see above) and will automatically be replaced with any information from currently available devices. """ + import abc import contextlib import itertools @@ -493,9 +494,9 @@ class WorkerPresenceHandler(BasePresenceHandler): # The number of ongoing syncs on this process, by (user ID, device ID). # Empty if _presence_enabled is false. - self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = ( - {} - ) + self._user_device_to_num_current_syncs: Dict[ + Tuple[str, Optional[str]], int + ] = {} self.notifier = hs.get_notifier() self.instance_id = hs.get_instance_id() @@ -818,9 +819,9 @@ class PresenceHandler(BasePresenceHandler): # Keeps track of the number of *ongoing* syncs on this process. While # this is non zero a user will never go offline. - self._user_device_to_num_current_syncs: Dict[Tuple[str, Optional[str]], int] = ( - {} - ) + self._user_device_to_num_current_syncs: Dict[ + Tuple[str, Optional[str]], int + ] = {} # Keeps track of the number of *ongoing* syncs on other processes. # diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 6663d4b271..cdc388b4ab 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py
@@ -22,6 +22,7 @@ import logging import random from typing import TYPE_CHECKING, List, Optional, Union +from synapse.api.constants import ProfileFields from synapse.api.errors import ( AuthError, Codes, @@ -31,7 +32,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia -from synapse.types import JsonDict, Requester, UserID, create_requester +from synapse.types import JsonDict, JsonValue, Requester, UserID, create_requester from synapse.util.caches.descriptors import cached from synapse.util.stringutils import parse_and_validate_mxc_uri @@ -42,6 +43,8 @@ logger = logging.getLogger(__name__) MAX_DISPLAYNAME_LEN = 256 MAX_AVATAR_URL_LEN = 1000 +# Field name length is specced at 255 bytes. +MAX_CUSTOM_FIELD_LEN = 255 class ProfileHandler: @@ -74,17 +77,42 @@ class ProfileHandler: self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict: + """ + Get a user's profile as a JSON dictionary. + + Args: + user_id: The user to fetch the profile of. + ignore_backoff: True to ignore backoff when fetching over federation. + + Returns: + A JSON dictionary. For local queries this will include the displayname and avatar_url + fields, if set. For remote queries it may contain arbitrary information. + """ target_user = UserID.from_string(user_id) if self.hs.is_mine(target_user): profileinfo = await self.store.get_profileinfo(target_user) - if profileinfo.display_name is None and profileinfo.avatar_url is None: + extra_fields = {} + if self.hs.config.experimental.msc4133_enabled: + extra_fields = await self.store.get_profile_fields(target_user) + + if ( + profileinfo.display_name is None + and profileinfo.avatar_url is None + and not extra_fields + ): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) - return { - "displayname": profileinfo.display_name, - "avatar_url": profileinfo.avatar_url, - } + # Do not include display name or avatar if unset. + ret = {} + if profileinfo.display_name is not None: + ret[ProfileFields.DISPLAYNAME] = profileinfo.display_name + if profileinfo.avatar_url is not None: + ret[ProfileFields.AVATAR_URL] = profileinfo.avatar_url + if extra_fields: + ret.update(extra_fields) + + return ret else: try: result = await self.federation.make_query( @@ -107,6 +135,15 @@ class ProfileHandler: raise e.to_synapse_error() async def get_displayname(self, target_user: UserID) -> Optional[str]: + """ + Fetch a user's display name from their profile. + + Args: + target_user: The user to fetch the display name of. + + Returns: + The user's display name or None if unset. + """ if self.hs.is_mine(target_user): try: displayname = await self.store.get_profile_displayname(target_user) @@ -203,6 +240,15 @@ class ProfileHandler: await self._update_join_states(requester, target_user) async def get_avatar_url(self, target_user: UserID) -> Optional[str]: + """ + Fetch a user's avatar URL from their profile. + + Args: + target_user: The user to fetch the avatar URL of. + + Returns: + The user's avatar URL or None if unset. + """ if self.hs.is_mine(target_user): try: avatar_url = await self.store.get_profile_avatar_url(target_user) @@ -322,9 +368,9 @@ class ProfileHandler: server_name = host if self._is_mine_server_name(server_name): - media_info: Optional[Union[LocalMedia, RemoteMedia]] = ( - await self.store.get_local_media(media_id) - ) + media_info: Optional[ + Union[LocalMedia, RemoteMedia] + ] = await self.store.get_local_media(media_id) else: media_info = await self.store.get_cached_remote_media(server_name, media_id) @@ -370,6 +416,110 @@ class ProfileHandler: return True + async def get_profile_field( + self, target_user: UserID, field_name: str + ) -> JsonValue: + """ + Fetch a user's profile from the database for local users and over federation + for remote users. + + Args: + target_user: The user ID to fetch the profile for. + field_name: The field to fetch the profile for. + + Returns: + The value for the profile field or None if the field does not exist. + """ + if self.hs.is_mine(target_user): + try: + field_value = await self.store.get_profile_field( + target_user, field_name + ) + except StoreError as e: + if e.code == 404: + raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) + raise + + return field_value + else: + try: + result = await self.federation.make_query( + destination=target_user.domain, + query_type="profile", + args={"user_id": target_user.to_string(), "field": field_name}, + ignore_backoff=True, + ) + except RequestSendFailed as e: + raise SynapseError(502, "Failed to fetch profile") from e + except HttpResponseException as e: + raise e.to_synapse_error() + + return result.get(field_name) + + async def set_profile_field( + self, + target_user: UserID, + requester: Requester, + field_name: str, + new_value: JsonValue, + by_admin: bool = False, + deactivation: bool = False, + ) -> None: + """Set a new profile field for a user. + + Args: + target_user: the user whose profile is to be changed. + requester: The user attempting to make this change. + field_name: The name of the profile field to update. + new_value: The new field value for this user. + by_admin: Whether this change was made by an administrator. + deactivation: Whether this change was made while deactivating the user. + """ + if not self.hs.is_mine(target_user): + raise SynapseError(400, "User is not hosted on this homeserver") + + if not by_admin and target_user != requester.user: + raise AuthError(403, "Cannot set another user's profile") + + await self.store.set_profile_field(target_user, field_name, new_value) + + # Custom fields do not propagate into the user directory *or* rooms. + profile = await self.store.get_profileinfo(target_user) + await self._third_party_rules.on_profile_update( + target_user.to_string(), profile, by_admin, deactivation + ) + + async def delete_profile_field( + self, + target_user: UserID, + requester: Requester, + field_name: str, + by_admin: bool = False, + deactivation: bool = False, + ) -> None: + """Delete a field from a user's profile. + + Args: + target_user: the user whose profile is to be changed. + requester: The user attempting to make this change. + field_name: The name of the profile field to remove. + by_admin: Whether this change was made by an administrator. + deactivation: Whether this change was made while deactivating the user. + """ + if not self.hs.is_mine(target_user): + raise SynapseError(400, "User is not hosted on this homeserver") + + if not by_admin and target_user != requester.user: + raise AuthError(400, "Cannot set another user's profile") + + await self.store.delete_profile_field(target_user, field_name) + + # Custom fields do not propagate into the user directory *or* rooms. + profile = await self.store.get_profileinfo(target_user) + await self._third_party_rules.on_profile_update( + target_user.to_string(), profile, by_admin, deactivation + ) + async def on_profile_query(self, args: JsonDict) -> JsonDict: """Handles federation profile query requests.""" @@ -386,13 +536,24 @@ class ProfileHandler: just_field = args.get("field", None) - response = {} + response: JsonDict = {} try: - if just_field is None or just_field == "displayname": + if just_field is None or just_field == ProfileFields.DISPLAYNAME: response["displayname"] = await self.store.get_profile_displayname(user) - if just_field is None or just_field == "avatar_url": + if just_field is None or just_field == ProfileFields.AVATAR_URL: response["avatar_url"] = await self.store.get_profile_avatar_url(user) + + if self.hs.config.experimental.msc4133_enabled: + if just_field is None: + response.update(await self.store.get_profile_fields(user)) + elif just_field not in ( + ProfileFields.DISPLAYNAME, + ProfileFields.AVATAR_URL, + ): + response[just_field] = await self.store.get_profile_field( + user, just_field + ) except StoreError as e: if e.code == 404: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) @@ -403,6 +564,12 @@ class ProfileHandler: async def _update_join_states( self, requester: Requester, target_user: UserID ) -> None: + """ + Update the membership events of each room the user is joined to with the + new profile information. + + Note that this stomps over any custom display name or avatar URL in member events. + """ if not self.hs.is_mine(target_user): return diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index c200e29569..8dd687c455 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py
@@ -23,10 +23,9 @@ """Contains functions for registering clients.""" import logging -from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, TypedDict from prometheus_client import Counter -from typing_extensions import TypedDict from synapse import types from synapse.api.constants import ( @@ -44,7 +43,6 @@ from synapse.api.errors import ( SynapseError, ) from synapse.appservice import ApplicationService -from synapse.config.server import is_threepid_reserved from synapse.handlers.device import DeviceHandler from synapse.http.servlet import assert_params_in_dict from synapse.replication.http.login import RegisterDeviceReplicationServlet @@ -109,13 +107,13 @@ class RegistrationHandler: self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() - self.identity_handler = self.hs.get_identity_handler() self.ratelimiter = hs.get_registration_ratelimiter() self.macaroon_gen = hs.get_macaroon_generator() self._account_validity_handler = hs.get_account_validity_handler() self._user_consent_version = self.hs.config.consent.user_consent_version self._server_notices_mxid = hs.config.servernotices.server_notices_mxid self._server_name = hs.hostname + self._user_types_config = hs.config.user_types self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker @@ -160,7 +158,10 @@ class RegistrationHandler: if not localpart: raise SynapseError(400, "User ID cannot be empty", Codes.INVALID_USERNAME) - if localpart[0] == "_": + if ( + localpart[0] == "_" + and not self.hs.config.registration.allow_underscore_prefixed_localpart + ): raise SynapseError( 400, "User ID may not begin with _", Codes.INVALID_USERNAME ) @@ -304,6 +305,9 @@ class RegistrationHandler: elif default_display_name is None: default_display_name = localpart + if user_type is None: + user_type = self._user_types_config.default_user_type + await self.register_with_store( user_id=user_id, password_hash=password_hash, @@ -380,19 +384,6 @@ class RegistrationHandler: user_id, ) - # Bind any specified emails to this account - current_time = self.hs.get_clock().time_msec() - for email in bind_emails: - # generate threepid dict - threepid_dict = { - "medium": "email", - "address": email, - "validated_at": current_time, - } - - # Bind email to new account - await self._register_email_threepid(user_id, threepid_dict, None) - return user_id async def _create_and_join_rooms(self, user_id: str) -> None: @@ -630,7 +621,9 @@ class RegistrationHandler: """ await self._auto_join_rooms(user_id) - async def appservice_register(self, user_localpart: str, as_token: str) -> str: + async def appservice_register( + self, user_localpart: str, as_token: str + ) -> Tuple[str, ApplicationService]: user = UserID(user_localpart, self.hs.hostname) user_id = user.to_string() service = self.store.get_app_service_by_token(as_token) @@ -653,7 +646,7 @@ class RegistrationHandler: appservice_id=service_id, create_profile_with_displayname=user.localpart, ) - return user_id + return (user_id, service) def check_user_id_not_appservice_exclusive( self, user_id: str, allowed_appservice: Optional[ApplicationService] = None @@ -941,21 +934,6 @@ class RegistrationHandler: ) return - if auth_result and LoginType.EMAIL_IDENTITY in auth_result: - threepid = auth_result[LoginType.EMAIL_IDENTITY] - # Necessary due to auth checks prior to the threepid being - # written to the db - if is_threepid_reserved( - self.hs.config.server.mau_limits_reserved_threepids, threepid - ): - await self.store.upsert_monthly_active_user(user_id) - - await self._register_email_threepid(user_id, threepid, access_token) - - if auth_result and LoginType.MSISDN in auth_result: - threepid = auth_result[LoginType.MSISDN] - await self._register_msisdn_threepid(user_id, threepid) - if auth_result and LoginType.TERMS in auth_result: # The terms type should only exist if consent is enabled. assert self._user_consent_version is not None @@ -971,86 +949,3 @@ class RegistrationHandler: logger.info("%s has consented to the privacy policy", user_id) await self.store.user_set_consent_version(user_id, consent_version) await self.post_consent_actions(user_id) - - async def _register_email_threepid( - self, user_id: str, threepid: dict, token: Optional[str] - ) -> None: - """Add an email address as a 3pid identifier - - Also adds an email pusher for the email address, if configured in the - HS config - - Must be called on master. - - Args: - user_id: id of user - threepid: m.login.email.identity auth response - token: access_token for the user, or None if not logged in. - """ - reqd = ("medium", "address", "validated_at") - if any(x not in threepid for x in reqd): - # This will only happen if the ID server returns a malformed response - logger.info("Can't add incomplete 3pid") - return - - await self._auth_handler.add_threepid( - user_id, - threepid["medium"], - threepid["address"], - threepid["validated_at"], - ) - - # And we add an email pusher for them by default, but only - # if email notifications are enabled (so people don't start - # getting mail spam where they weren't before if email - # notifs are set up on a homeserver) - if ( - self.hs.config.email.email_enable_notifs - and self.hs.config.email.email_notif_for_new_users - and token - ): - # Pull the ID of the access token back out of the db - # It would really make more sense for this to be passed - # up when the access token is saved, but that's quite an - # invasive change I'd rather do separately. - user_tuple = await self.store.get_user_by_access_token(token) - # The token better still exist. - assert user_tuple - device_id = user_tuple.device_id - - await self.pusher_pool.add_or_update_pusher( - user_id=user_id, - device_id=device_id, - kind="email", - app_id="m.email", - app_display_name="Email Notifications", - device_display_name=threepid["address"], - pushkey=threepid["address"], - lang=None, - data={}, - ) - - async def _register_msisdn_threepid(self, user_id: str, threepid: dict) -> None: - """Add a phone number as a 3pid identifier - - Must be called on master. - - Args: - user_id: id of user - threepid: m.login.msisdn auth response - """ - try: - assert_params_in_dict(threepid, ["medium", "address", "validated_at"]) - except SynapseError as ex: - if ex.errcode == Codes.MISSING_PARAM: - # This will only happen if the ID server returns a malformed response - logger.info("Can't add incomplete 3pid") - return None - raise - - await self._auth_handler.add_threepid( - user_id, - threepid["medium"], - threepid["address"], - threepid["validated_at"], - ) diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py
index efe31e81f9..b1158ee77d 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py
@@ -188,13 +188,13 @@ class RelationsHandler: if include_original_event: # Do not bundle aggregations when retrieving the original event because # we want the content before relations are applied to it. - return_value["original_event"] = ( - await self._event_serializer.serialize_event( - event, - now, - bundle_aggregations=None, - config=serialize_options, - ) + return_value[ + "original_event" + ] = await self._event_serializer.serialize_event( + event, + now, + bundle_aggregations=None, + config=serialize_options, ) if next_token: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 2c6e672ede..1ccb6f7171 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py
@@ -20,6 +20,7 @@ # """Contains functions for performing actions on rooms.""" + import itertools import logging import math @@ -467,17 +468,6 @@ class RoomCreationHandler: """ user_id = requester.user.to_string() - spam_check = await self._spam_checker_module_callbacks.user_may_create_room( - user_id - ) - if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: - raise SynapseError( - 403, - "You are not permitted to create rooms", - errcode=spam_check[0], - additional_fields=spam_check[1], - ) - creation_content: JsonDict = { "room_version": new_room_version.identifier, "predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id}, @@ -584,6 +574,24 @@ class RoomCreationHandler: if current_power_level_int < needed_power_level: user_power_levels[user_id] = needed_power_level + # We construct what the body of a call to /createRoom would look like for passing + # to the spam checker. We don't include a preset here, as we expect the + # initial state to contain everything we need. + spam_check = await self._spam_checker_module_callbacks.user_may_create_room( + user_id, + { + "creation_content": creation_content, + "initial_state": list(initial_state.items()), + }, + ) + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: + raise SynapseError( + 403, + "You are not permitted to create rooms", + errcode=spam_check[0], + additional_fields=spam_check[1], + ) + await self._send_events_for_new_room( requester, new_room_id, @@ -785,7 +793,7 @@ class RoomCreationHandler: if not is_requester_admin: spam_check = await self._spam_checker_module_callbacks.user_may_create_room( - user_id + user_id, config ) if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise SynapseError( @@ -900,11 +908,9 @@ class RoomCreationHandler: ) # Check whether this visibility value is blocked by a third party module - allowed_by_third_party_rules = ( - await ( - self._third_party_event_rules.check_visibility_can_be_modified( - room_id, visibility - ) + allowed_by_third_party_rules = await ( + self._third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility ) ) if not allowed_by_third_party_rules: @@ -1754,7 +1760,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]): ) events = list(room_events) - events.extend(e for evs, _ in room_to_events.values() for e in evs) + events.extend(e for evs, _, _ in room_to_events.values() for e in evs) # We know stream_ordering must be not None here, as its been # persisted, but mypy doesn't know that @@ -1807,7 +1813,7 @@ class RoomShutdownHandler: ] = None, ) -> Optional[ShutdownRoomResponse]: """ - Shuts down a room. Moves all local users and room aliases automatically + Shuts down a room. Moves all joined local users and room aliases automatically to a new room if `new_room_user_id` is set. Otherwise local users only leave the room without any information. @@ -1950,16 +1956,17 @@ class RoomShutdownHandler: # Join users to new room if new_room_user_id: - assert new_room_id is not None - await self.room_member_handler.update_membership( - requester=target_requester, - target=target_requester.user, - room_id=new_room_id, - action=Membership.JOIN, - content={}, - ratelimit=False, - require_consent=False, - ) + if membership == Membership.JOIN: + assert new_room_id is not None + await self.room_member_handler.update_membership( + requester=target_requester, + target=target_requester.user, + room_id=new_room_id, + action=Membership.JOIN, + content={}, + ratelimit=False, + require_consent=False, + ) result["kicked_users"].append(user_id) if update_result_fct: diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 51b9772329..a3a7326d94 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py
@@ -53,6 +53,7 @@ from synapse.metrics import event_processing_positions from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.push import ReplicationCopyPusherRestServlet from synapse.storage.databases.main.state_deltas import StateDelta +from synapse.storage.invite_rule import InviteRule from synapse.types import ( JsonDict, Requester, @@ -98,7 +99,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): self.federation_handler = hs.get_federation_handler() self.directory_handler = hs.get_directory_handler() - self.identity_handler = hs.get_identity_handler() self.registration_handler = hs.get_registration_handler() self.profile_handler = hs.get_profile_handler() self.event_creation_handler = hs.get_event_creation_handler() @@ -122,7 +122,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): hs.get_module_api_callbacks().third_party_event_rules ) self._server_notices_mxid = self.config.servernotices.server_notices_mxid - self._enable_lookup = hs.config.registration.enable_3pid_lookup self.allow_per_room_profiles = self.config.server.allow_per_room_profiles self._join_rate_limiter_local = Ratelimiter( @@ -158,6 +157,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): store=self.store, clock=self.clock, cfg=hs.config.ratelimiting.rc_invites_per_room, + ratelimit_callbacks=hs.get_module_api_callbacks().ratelimit, ) # Ratelimiter for invites, keyed by recipient (across all rooms, all @@ -166,6 +166,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): store=self.store, clock=self.clock, cfg=hs.config.ratelimiting.rc_invites_per_user, + ratelimit_callbacks=hs.get_module_api_callbacks().ratelimit, ) # Ratelimiter for invites, keyed by issuer (across all rooms, all @@ -174,6 +175,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): store=self.store, clock=self.clock, cfg=hs.config.ratelimiting.rc_invites_per_issuer, + ratelimit_callbacks=hs.get_module_api_callbacks().ratelimit, ) self._third_party_invite_limiter = Ratelimiter( @@ -912,6 +914,21 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): additional_fields=block_invite_result[1], ) + # check the invitee's configuration and apply rules. Admins on the server can bypass. + if not is_requester_admin: + invite_config = await self.store.get_invite_config_for_user(target_id) + rule = invite_config.get_invite_rule(requester.user.to_string()) + if rule == InviteRule.BLOCK: + logger.info( + f"Automatically rejecting invite from {target_id} due to the the invite filtering rules of {requester.user}" + ) + raise SynapseError( + 403, + "You are not permitted to invite this user.", + errcode=Codes.INVITE_BLOCKED, + ) + # InviteRule.IGNORE is handled at the sync layer. + # An empty prev_events list is allowed as long as the auth_event_ids are present if prev_event_ids is not None: return await self._local_membership_update( @@ -1190,6 +1207,26 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): origin_server_ts=origin_server_ts, ) + async def check_for_any_membership_in_room( + self, *, user_id: str, room_id: str + ) -> None: + """ + Check if the user has any membership in the room and raise error if not. + + Args: + user_id: The user to check. + room_id: The room to check. + + Raises: + AuthError if the user doesn't have any membership in the room. + """ + result = await self.store.get_local_current_membership_for_user_in_room( + user_id=user_id, room_id=room_id + ) + + if result is None or result == (None, None): + raise AuthError(403, f"User {user_id} has no membership in room {room_id}") + async def _should_perform_remote_join( self, user_id: str, @@ -1302,11 +1339,11 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # If this is going to be a local join, additional information must # be included in the event content in order to efficiently validate # the event. - content[EventContentFields.AUTHORISING_USER] = ( - await self.event_auth_handler.get_user_which_could_invite( - room_id, - state_before_join, - ) + content[ + EventContentFields.AUTHORISING_USER + ] = await self.event_auth_handler.get_user_which_could_invite( + room_id, + state_before_join, ) return False, [] @@ -1415,9 +1452,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if requester is not None: sender = UserID.from_string(event.sender) - assert ( - sender == requester.user - ), "Sender (%s) must be same as requester (%s)" % (sender, requester.user) + assert sender == requester.user, ( + "Sender (%s) must be same as requester (%s)" % (sender, requester.user) + ) assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,) else: requester = types.create_requester(target_user) @@ -1572,230 +1609,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): return UserID.from_string(invite.sender) return None - async def do_3pid_invite( - self, - room_id: str, - inviter: UserID, - medium: str, - address: str, - id_server: str, - requester: Requester, - txn_id: Optional[str], - id_access_token: str, - prev_event_ids: Optional[List[str]] = None, - depth: Optional[int] = None, - ) -> Tuple[str, int]: - """Invite a 3PID to a room. - - Args: - room_id: The room to invite the 3PID to. - inviter: The user sending the invite. - medium: The 3PID's medium. - address: The 3PID's address. - id_server: The identity server to use. - requester: The user making the request. - txn_id: The transaction ID this is part of, or None if this is not - part of a transaction. - id_access_token: Identity server access token. - depth: Override the depth used to order the event in the DAG. - prev_event_ids: The event IDs to use as the prev events - Should normally be set to None, which will cause the depth to be calculated - based on the prev_events. - - Returns: - Tuple of event ID and stream ordering position - - Raises: - ShadowBanError if the requester has been shadow-banned. - """ - if self.config.server.block_non_admin_invites: - is_requester_admin = await self.auth.is_server_admin(requester) - if not is_requester_admin: - raise SynapseError( - 403, "Invites have been disabled on this server", Codes.FORBIDDEN - ) - - if requester.shadow_banned: - # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) - raise ShadowBanError() - - # We need to rate limit *before* we send out any 3PID invites, so we - # can't just rely on the standard ratelimiting of events. - await self._third_party_invite_limiter.ratelimit(requester) - - can_invite = await self._third_party_event_rules.check_threepid_can_be_invited( - medium, address, room_id - ) - if not can_invite: - raise SynapseError( - 403, - "This third-party identifier can not be invited in this room", - Codes.FORBIDDEN, - ) - - if not self._enable_lookup: - raise SynapseError( - 403, "Looking up third-party identifiers is denied from this server" - ) - - invitee = await self.identity_handler.lookup_3pid( - id_server, medium, address, id_access_token - ) - - if invitee: - # Note that update_membership with an action of "invite" can raise - # a ShadowBanError, but this was done above already. - # We don't check the invite against the spamchecker(s) here (through - # user_may_invite) because we'll do it further down the line anyway (in - # update_membership_locked). - event_id, stream_id = await self.update_membership( - requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id - ) - else: - # Check if the spamchecker(s) allow this invite to go through. - spam_check = ( - await self._spam_checker_module_callbacks.user_may_send_3pid_invite( - inviter_userid=requester.user.to_string(), - medium=medium, - address=address, - room_id=room_id, - ) - ) - if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: - raise SynapseError( - 403, - "Cannot send threepid invite", - errcode=spam_check[0], - additional_fields=spam_check[1], - ) - - event, stream_id = await self._make_and_store_3pid_invite( - requester, - id_server, - medium, - address, - room_id, - inviter, - txn_id=txn_id, - id_access_token=id_access_token, - prev_event_ids=prev_event_ids, - depth=depth, - ) - event_id = event.event_id - - return event_id, stream_id - - async def _make_and_store_3pid_invite( - self, - requester: Requester, - id_server: str, - medium: str, - address: str, - room_id: str, - user: UserID, - txn_id: Optional[str], - id_access_token: str, - prev_event_ids: Optional[List[str]] = None, - depth: Optional[int] = None, - ) -> Tuple[EventBase, int]: - room_state = await self._storage_controllers.state.get_current_state( - room_id, - StateFilter.from_types( - [ - (EventTypes.Member, user.to_string()), - (EventTypes.CanonicalAlias, ""), - (EventTypes.Name, ""), - (EventTypes.Create, ""), - (EventTypes.JoinRules, ""), - (EventTypes.RoomAvatar, ""), - ] - ), - ) - - inviter_display_name = "" - inviter_avatar_url = "" - member_event = room_state.get((EventTypes.Member, user.to_string())) - if member_event: - inviter_display_name = member_event.content.get("displayname", "") - inviter_avatar_url = member_event.content.get("avatar_url", "") - - # if user has no display name, default to their MXID - if not inviter_display_name: - inviter_display_name = user.to_string() - - canonical_room_alias = "" - canonical_alias_event = room_state.get((EventTypes.CanonicalAlias, "")) - if canonical_alias_event: - canonical_room_alias = canonical_alias_event.content.get("alias", "") - - room_name = "" - room_name_event = room_state.get((EventTypes.Name, "")) - if room_name_event: - room_name = room_name_event.content.get("name", "") - - room_type = None - room_create_event = room_state.get((EventTypes.Create, "")) - if room_create_event: - room_type = room_create_event.content.get(EventContentFields.ROOM_TYPE) - - room_join_rules = "" - join_rules_event = room_state.get((EventTypes.JoinRules, "")) - if join_rules_event: - room_join_rules = join_rules_event.content.get("join_rule", "") - - room_avatar_url = "" - room_avatar_event = room_state.get((EventTypes.RoomAvatar, "")) - if room_avatar_event: - room_avatar_url = room_avatar_event.content.get("url", "") - - ( - token, - public_keys, - fallback_public_key, - display_name, - ) = await self.identity_handler.ask_id_server_for_third_party_invite( - requester=requester, - id_server=id_server, - medium=medium, - address=address, - room_id=room_id, - inviter_user_id=user.to_string(), - room_alias=canonical_room_alias, - room_avatar_url=room_avatar_url, - room_join_rules=room_join_rules, - room_name=room_name, - room_type=room_type, - inviter_display_name=inviter_display_name, - inviter_avatar_url=inviter_avatar_url, - id_access_token=id_access_token, - ) - - ( - event, - stream_id, - ) = await self.event_creation_handler.create_and_send_nonmember_event( - requester, - { - "type": EventTypes.ThirdPartyInvite, - "content": { - "display_name": display_name, - "public_keys": public_keys, - # For backwards compatibility: - "key_validity_url": fallback_public_key["key_validity_url"], - "public_key": fallback_public_key["public_key"], - }, - "room_id": room_id, - "sender": user.to_string(), - "state_key": token, - }, - ratelimit=False, - txn_id=txn_id, - prev_event_ids=prev_event_ids, - depth=depth, - ) - return event, stream_id - async def _is_host_in_room(self, partial_current_state_ids: StateMap[str]) -> bool: """Returns whether the homeserver is in the room based on its current state. diff --git a/synapse/handlers/room_policy.py b/synapse/handlers/room_policy.py new file mode 100644
index 0000000000..3a83c4d6ec --- /dev/null +++ b/synapse/handlers/room_policy.py
@@ -0,0 +1,96 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright 2016-2021 The Matrix.org Foundation C.I.C. +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# + +import logging +from typing import TYPE_CHECKING + +from synapse.events import EventBase +from synapse.types.handlers.policy_server import RECOMMENDATION_OK +from synapse.util.stringutils import parse_and_validate_server_name + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class RoomPolicyHandler: + def __init__(self, hs: "HomeServer"): + self._hs = hs + self._store = hs.get_datastores().main + self._storage_controllers = hs.get_storage_controllers() + self._event_auth_handler = hs.get_event_auth_handler() + self._federation_client = hs.get_federation_client() + + async def is_event_allowed(self, event: EventBase) -> bool: + """Check if the given event is allowed in the room by the policy server. + + Note: This will *always* return True if the room's policy server is Synapse + itself. This is because Synapse can't be a policy server (currently). + + If no policy server is configured in the room, this returns True. Similarly, if + the policy server is invalid in any way (not joined, not a server, etc), this + returns True. + + If a valid and contactable policy server is configured in the room, this returns + True if that server suggests the event is not spammy, and False otherwise. + + Args: + event: The event to check. This should be a fully-formed PDU. + + Returns: + bool: True if the event is allowed in the room, False otherwise. + """ + policy_event = await self._storage_controllers.state.get_current_state_event( + event.room_id, "org.matrix.msc4284.policy", "" + ) + if not policy_event: + return True # no policy server == default allow + + policy_server = policy_event.content.get("via", "") + if policy_server is None or not isinstance(policy_server, str): + return True # no policy server == default allow + + if policy_server == self._hs.hostname: + return True # Synapse itself can't be a policy server (currently) + + try: + parse_and_validate_server_name(policy_server) + except ValueError: + return True # invalid policy server == default allow + + is_in_room = await self._event_auth_handler.is_host_in_room( + event.room_id, policy_server + ) + if not is_in_room: + return True # policy server not in room == default allow + + # At this point, the server appears valid and is in the room, so ask it to check + # the event. + recommendation = await self._federation_client.get_pdu_policy_recommendation( + policy_server, event + ) + if recommendation != RECOMMENDATION_OK: + logger.info( + "[POLICY] Policy server %s recommended not to allow event %s in room %s: %s", + policy_server, + event.event_id, + event.room_id, + recommendation, + ) + return False + + return True # default allow diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py
index 720459f1e7..1c39cfed1b 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py
@@ -183,8 +183,13 @@ class RoomSummaryHandler: ) -> JsonDict: """See docstring for SpaceSummaryHandler.get_room_hierarchy.""" - # First of all, check that the room is accessible. - if not await self._is_local_room_accessible(requested_room_id, requester): + # If the room is available locally, quickly check that the user can access it. + local_room = await self._store.is_host_joined( + requested_room_id, self._server_name + ) + if local_room and not await self._is_local_room_accessible( + requested_room_id, requester + ): raise UnstableSpecAuthError( 403, "User %s not in room %s, and room previews are disabled" @@ -192,6 +197,22 @@ class RoomSummaryHandler: errcode=Codes.NOT_JOINED, ) + if not local_room: + room_hierarchy = await self._summarize_remote_room_hierarchy( + _RoomQueueEntry(requested_room_id, ()), + False, + ) + root_room_entry = room_hierarchy[0] + if not root_room_entry or not await self._is_remote_room_accessible( + requester, requested_room_id, root_room_entry.room + ): + raise UnstableSpecAuthError( + 403, + "User %s not in room %s, and room previews are disabled" + % (requester, requested_room_id), + errcode=Codes.NOT_JOINED, + ) + # If this is continuing a previous session, pull the persisted data. if from_token: try: @@ -679,23 +700,55 @@ class RoomSummaryHandler: """ # The API doesn't return the room version so assume that a # join rule of knock is valid. + join_rule = room.get("join_rule") + world_readable = room.get("world_readable") + + logger.warning( + "[EMMA] Checking if room %s is accessible to %s: join_rule=%s, world_readable=%s", + room_id, requester, join_rule, world_readable + ) + if ( - room.get("join_rule") - in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED) - or room.get("world_readable") is True + join_rule in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED) + or world_readable is True ): return True - elif not requester: + else: + logger.warning( + "[EMMA] Room %s is not accessible to %s: join_rule=%s, world_readable=%s, join_rule result=%s, world_readable result=%s", + room_id, requester, join_rule, world_readable, + join_rule in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED), + world_readable is True + ) + + if not requester: + logger.warning( + "[EMMA] No requester, so room %s is not accessible", + room_id + ) return False + # Check if the user is a member of any of the allowed rooms from the response. allowed_rooms = room.get("allowed_room_ids") + logger.warning( + "[EMMA] Checking if room %s is in allowed rooms for %s: join_rule=%s, allowed_rooms=%s", + requester, + room_id, + join_rule, + allowed_rooms + ) if allowed_rooms and isinstance(allowed_rooms, list): if await self._event_auth_handler.is_user_in_rooms( allowed_rooms, requester ): return True + logger.warning( + "[EMMA] Checking if room %s is accessble to %s via local state", + room_id, + requester + ) # Finally, check locally if we can access the room. The user might # already be in the room (if it was a child room), or there might be a # pending invite, etc. @@ -863,6 +916,10 @@ class RoomSummaryHandler: if not room_entry or not await self._is_remote_room_accessible( requester, room_entry.room_id, room_entry.room ): + logger.warning( + "[Emma] Room entry contents: %s", + room_entry.room if room_entry else None + ) raise NotFoundError("Room not found or is not accessible") room = dict(room_entry.room) diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py deleted file mode 100644
index 8ebd3d4ff9..0000000000 --- a/synapse/handlers/saml.py +++ /dev/null
@@ -1,524 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2019 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# -import logging -import re -from typing import TYPE_CHECKING, Callable, Dict, Optional, Set, Tuple - -import attr -import saml2 -import saml2.response -from saml2.client import Saml2Client - -from synapse.api.errors import SynapseError -from synapse.config import ConfigError -from synapse.handlers.sso import MappingException, UserAttributes -from synapse.http.servlet import parse_string -from synapse.http.site import SynapseRequest -from synapse.module_api import ModuleApi -from synapse.types import ( - MXID_LOCALPART_ALLOWED_CHARACTERS, - UserID, - map_username_to_mxid_localpart, -) -from synapse.util.iterutils import chunk_seq - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -@attr.s(slots=True, auto_attribs=True) -class Saml2SessionData: - """Data we track about SAML2 sessions""" - - # time the session was created, in milliseconds - creation_time: int - # The user interactive authentication session ID associated with this SAML - # session (or None if this SAML session is for an initial login). - ui_auth_session_id: Optional[str] = None - - -class SamlHandler: - def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastores().main - self.clock = hs.get_clock() - self.server_name = hs.hostname - self._saml_client = Saml2Client(hs.config.saml2.saml2_sp_config) - self._saml_idp_entityid = hs.config.saml2.saml2_idp_entityid - - self._saml2_session_lifetime = hs.config.saml2.saml2_session_lifetime - self._grandfathered_mxid_source_attribute = ( - hs.config.saml2.saml2_grandfathered_mxid_source_attribute - ) - self._saml2_attribute_requirements = hs.config.saml2.attribute_requirements - - # plugin to do custom mapping from saml response to mxid - self._user_mapping_provider = hs.config.saml2.saml2_user_mapping_provider_class( - hs.config.saml2.saml2_user_mapping_provider_config, - ModuleApi(hs, hs.get_auth_handler()), - ) - - # identifier for the external_ids table - self.idp_id = "saml" - - # user-facing name of this auth provider - self.idp_name = hs.config.saml2.idp_name - - # MXC URI for icon for this auth provider - self.idp_icon = hs.config.saml2.idp_icon - - # optional brand identifier for this auth provider - self.idp_brand = hs.config.saml2.idp_brand - - # a map from saml session id to Saml2SessionData object - self._outstanding_requests_dict: Dict[str, Saml2SessionData] = {} - - self._sso_handler = hs.get_sso_handler() - self._sso_handler.register_identity_provider(self) - - async def handle_redirect_request( - self, - request: SynapseRequest, - client_redirect_url: Optional[bytes], - ui_auth_session_id: Optional[str] = None, - ) -> str: - """Handle an incoming request to /login/sso/redirect - - Args: - request: the incoming HTTP request - client_redirect_url: the URL that we should redirect the - client to after login (or None for UI Auth). - ui_auth_session_id: The session ID of the ongoing UI Auth (or - None if this is a login). - - Returns: - URL to redirect to - """ - if not client_redirect_url: - # Some SAML identity providers (e.g. Google) require a - # RelayState parameter on requests, so pass in a dummy redirect URL - # (which will never get used). - client_redirect_url = b"unused" - - reqid, info = self._saml_client.prepare_for_authenticate( - entityid=self._saml_idp_entityid, relay_state=client_redirect_url - ) - - # Since SAML sessions timeout it is useful to log when they were created. - logger.info("Initiating a new SAML session: %s" % (reqid,)) - - now = self.clock.time_msec() - self._outstanding_requests_dict[reqid] = Saml2SessionData( - creation_time=now, - ui_auth_session_id=ui_auth_session_id, - ) - - for key, value in info["headers"]: - if key == "Location": - return value - - # this shouldn't happen! - raise Exception("prepare_for_authenticate didn't return a Location header") - - async def handle_saml_response(self, request: SynapseRequest) -> None: - """Handle an incoming request to /_synapse/client/saml2/authn_response - - Args: - request: the incoming request from the browser. We'll - respond to it with a redirect. - - Returns: - Completes once we have handled the request. - """ - resp_bytes = parse_string(request, "SAMLResponse", required=True) - relay_state = parse_string(request, "RelayState", required=True) - - # expire outstanding sessions before parse_authn_request_response checks - # the dict. - self.expire_sessions() - - try: - saml2_auth = self._saml_client.parse_authn_request_response( - resp_bytes, - saml2.BINDING_HTTP_POST, - outstanding=self._outstanding_requests_dict, - ) - except saml2.response.UnsolicitedResponse as e: - # the pysaml2 library helpfully logs an ERROR here, but neglects to log - # the session ID. I don't really want to put the full text of the exception - # in the (user-visible) exception message, so let's log the exception here - # so we can track down the session IDs later. - logger.warning(str(e)) - self._sso_handler.render_error( - request, "unsolicited_response", "Unexpected SAML2 login." - ) - return - except Exception as e: - self._sso_handler.render_error( - request, - "invalid_response", - "Unable to parse SAML2 response: %s." % (e,), - ) - return - - if saml2_auth.not_signed: - self._sso_handler.render_error( - request, "unsigned_respond", "SAML2 response was not signed." - ) - return - - logger.debug("SAML2 response: %s", saml2_auth.origxml) - - await self._handle_authn_response(request, saml2_auth, relay_state) - - async def _handle_authn_response( - self, - request: SynapseRequest, - saml2_auth: saml2.response.AuthnResponse, - relay_state: str, - ) -> None: - """Handle an AuthnResponse, having parsed it from the request params - - Assumes that the signature on the response object has been checked. Maps - the user onto an MXID, registering them if necessary, and returns a response - to the browser. - - Args: - request: the incoming request from the browser. We'll respond to it with an - HTML page or a redirect - saml2_auth: the parsed AuthnResponse object - relay_state: the RelayState query param, which encodes the URI to rediret - back to - """ - - for assertion in saml2_auth.assertions: - # kibana limits the length of a log field, whereas this is all rather - # useful, so split it up. - count = 0 - for part in chunk_seq(str(assertion), 10000): - logger.info( - "SAML2 assertion: %s%s", "(%i)..." % (count,) if count else "", part - ) - count += 1 - - logger.info("SAML2 mapped attributes: %s", saml2_auth.ava) - - current_session = self._outstanding_requests_dict.pop( - saml2_auth.in_response_to, None - ) - - # first check if we're doing a UIA - if current_session and current_session.ui_auth_session_id: - try: - remote_user_id = self._remote_id_from_saml_response(saml2_auth, None) - except MappingException as e: - logger.exception("Failed to extract remote user id from SAML response") - self._sso_handler.render_error(request, "mapping_error", str(e)) - return - - return await self._sso_handler.complete_sso_ui_auth_request( - self.idp_id, - remote_user_id, - current_session.ui_auth_session_id, - request, - ) - - # otherwise, we're handling a login request. - - # Ensure that the attributes of the logged in user meet the required - # attributes. - if not self._sso_handler.check_required_attributes( - request, saml2_auth.ava, self._saml2_attribute_requirements - ): - return - - # Call the mapper to register/login the user - try: - await self._complete_saml_login(saml2_auth, request, relay_state) - except MappingException as e: - logger.exception("Could not map user") - self._sso_handler.render_error(request, "mapping_error", str(e)) - - async def _complete_saml_login( - self, - saml2_auth: saml2.response.AuthnResponse, - request: SynapseRequest, - client_redirect_url: str, - ) -> None: - """ - Given a SAML response, complete the login flow - - Retrieves the remote user ID, registers the user if necessary, and serves - a redirect back to the client with a login-token. - - Args: - saml2_auth: The parsed SAML2 response. - request: The request to respond to - client_redirect_url: The redirect URL passed in by the client. - - Raises: - MappingException if there was a problem mapping the response to a user. - RedirectException: some mapping providers may raise this if they need - to redirect to an interstitial page. - """ - remote_user_id = self._remote_id_from_saml_response( - saml2_auth, client_redirect_url - ) - - async def saml_response_to_remapped_user_attributes( - failures: int, - ) -> UserAttributes: - """ - Call the mapping provider to map a SAML response to user attributes and coerce the result into the standard form. - - This is backwards compatibility for abstraction for the SSO handler. - """ - # Call the mapping provider. - result = self._user_mapping_provider.saml_response_to_user_attributes( - saml2_auth, failures, client_redirect_url - ) - # Remap some of the results. - return UserAttributes( - localpart=result.get("mxid_localpart"), - display_name=result.get("displayname"), - emails=result.get("emails", []), - ) - - async def grandfather_existing_users() -> Optional[str]: - # backwards-compatibility hack: see if there is an existing user with a - # suitable mapping from the uid - if ( - self._grandfathered_mxid_source_attribute - and self._grandfathered_mxid_source_attribute in saml2_auth.ava - ): - attrval = saml2_auth.ava[self._grandfathered_mxid_source_attribute][0] - user_id = UserID( - map_username_to_mxid_localpart(attrval), self.server_name - ).to_string() - - logger.debug( - "Looking for existing account based on mapped %s %s", - self._grandfathered_mxid_source_attribute, - user_id, - ) - - users = await self.store.get_users_by_id_case_insensitive(user_id) - if users: - registered_user_id = list(users.keys())[0] - logger.info("Grandfathering mapping to %s", registered_user_id) - return registered_user_id - - return None - - await self._sso_handler.complete_sso_login_request( - self.idp_id, - remote_user_id, - request, - client_redirect_url, - saml_response_to_remapped_user_attributes, - grandfather_existing_users, - ) - - def _remote_id_from_saml_response( - self, - saml2_auth: saml2.response.AuthnResponse, - client_redirect_url: Optional[str], - ) -> str: - """Extract the unique remote id from a SAML2 AuthnResponse - - Args: - saml2_auth: The parsed SAML2 response. - client_redirect_url: The redirect URL passed in by the client. - Returns: - remote user id - - Raises: - MappingException if there was an error extracting the user id - """ - # It's not obvious why we need to pass in the redirect URI to the mapping - # provider, but we do :/ - remote_user_id = self._user_mapping_provider.get_remote_user_id( - saml2_auth, client_redirect_url - ) - - if not remote_user_id: - raise MappingException( - "Failed to extract remote user id from SAML response" - ) - - return remote_user_id - - def expire_sessions(self) -> None: - expire_before = self.clock.time_msec() - self._saml2_session_lifetime - to_expire = set() - for reqid, data in self._outstanding_requests_dict.items(): - if data.creation_time < expire_before: - to_expire.add(reqid) - for reqid in to_expire: - logger.debug("Expiring session id %s", reqid) - del self._outstanding_requests_dict[reqid] - - -DOT_REPLACE_PATTERN = re.compile( - "[^%s]" % (re.escape("".join(MXID_LOCALPART_ALLOWED_CHARACTERS)),) -) - - -def dot_replace_for_mxid(username: str) -> str: - """Replace any characters which are not allowed in Matrix IDs with a dot.""" - username = username.lower() - username = DOT_REPLACE_PATTERN.sub(".", username) - - # regular mxids aren't allowed to start with an underscore either - username = re.sub("^_", "", username) - return username - - -MXID_MAPPER_MAP: Dict[str, Callable[[str], str]] = { - "hexencode": map_username_to_mxid_localpart, - "dotreplace": dot_replace_for_mxid, -} - - -@attr.s(auto_attribs=True) -class SamlConfig: - mxid_source_attribute: str - mxid_mapper: Callable[[str], str] - - -class DefaultSamlMappingProvider: - __version__ = "0.0.1" - - def __init__(self, parsed_config: SamlConfig, module_api: ModuleApi): - """The default SAML user mapping provider - - Args: - parsed_config: Module configuration - module_api: module api proxy - """ - self._mxid_source_attribute = parsed_config.mxid_source_attribute - self._mxid_mapper = parsed_config.mxid_mapper - - self._grandfathered_mxid_source_attribute = ( - module_api._hs.config.saml2.saml2_grandfathered_mxid_source_attribute - ) - - def get_remote_user_id( - self, saml_response: saml2.response.AuthnResponse, client_redirect_url: str - ) -> str: - """Extracts the remote user id from the SAML response""" - try: - return saml_response.ava["uid"][0] - except KeyError: - logger.warning("SAML2 response lacks a 'uid' attestation") - raise MappingException("'uid' not in SAML2 response") - - def saml_response_to_user_attributes( - self, - saml_response: saml2.response.AuthnResponse, - failures: int, - client_redirect_url: str, - ) -> dict: - """Maps some text from a SAML response to attributes of a new user - - Args: - saml_response: A SAML auth response object - - failures: How many times a call to this function with this - saml_response has resulted in a failure - - client_redirect_url: where the client wants to redirect to - - Returns: - A dict containing new user attributes. Possible keys: - * mxid_localpart (str): Required. The localpart of the user's mxid - * displayname (str): The displayname of the user - * emails (list[str]): Any emails for the user - """ - try: - mxid_source = saml_response.ava[self._mxid_source_attribute][0] - except KeyError: - logger.warning( - "SAML2 response lacks a '%s' attestation", - self._mxid_source_attribute, - ) - raise SynapseError( - 400, "%s not in SAML2 response" % (self._mxid_source_attribute,) - ) - - # Use the configured mapper for this mxid_source - localpart = self._mxid_mapper(mxid_source) - - # Append suffix integer if last call to this function failed to produce - # a usable mxid. - localpart += str(failures) if failures else "" - - # Retrieve the display name from the saml response - # If displayname is None, the mxid_localpart will be used instead - displayname = saml_response.ava.get("displayName", [None])[0] - - # Retrieve any emails present in the saml response - emails = saml_response.ava.get("email", []) - - return { - "mxid_localpart": localpart, - "displayname": displayname, - "emails": emails, - } - - @staticmethod - def parse_config(config: dict) -> SamlConfig: - """Parse the dict provided by the homeserver's config - Args: - config: A dictionary containing configuration options for this provider - Returns: - A custom config object for this module - """ - # Parse config options and use defaults where necessary - mxid_source_attribute = config.get("mxid_source_attribute", "uid") - mapping_type = config.get("mxid_mapping", "hexencode") - - # Retrieve the associating mapping function - try: - mxid_mapper = MXID_MAPPER_MAP[mapping_type] - except KeyError: - raise ConfigError( - "saml2_config.user_mapping_provider.config: '%s' is not a valid " - "mxid_mapping value" % (mapping_type,) - ) - - return SamlConfig(mxid_source_attribute, mxid_mapper) - - @staticmethod - def get_saml_attributes(config: SamlConfig) -> Tuple[Set[str], Set[str]]: - """Returns the required attributes of a SAML - - Args: - config: A SamlConfig object containing configuration params for this provider - - Returns: - The first set equates to the saml auth response - attributes that are required for the module to function, whereas the - second set consists of those attributes which can be used if - available, but are not necessary - """ - return {"uid", config.mxid_source_attribute}, {"displayName", "email"} diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index a7d52fa648..1a71135d5f 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py
@@ -423,9 +423,9 @@ class SearchHandler: } if search_result.room_groups and "room_id" in group_keys: - rooms_cat_res.setdefault("groups", {})[ - "room_id" - ] = search_result.room_groups + rooms_cat_res.setdefault("groups", {})["room_id"] = ( + search_result.room_groups + ) if sender_group and "sender" in group_keys: rooms_cat_res.setdefault("groups", {})["sender"] = sender_group diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py deleted file mode 100644
index 70cdb0721c..0000000000 --- a/synapse/handlers/send_email.py +++ /dev/null
@@ -1,230 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2021 The Matrix.org C.I.C. Foundation -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -import email.utils -import logging -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText -from io import BytesIO -from typing import TYPE_CHECKING, Any, Dict, Optional - -from pkg_resources import parse_version - -import twisted -from twisted.internet.defer import Deferred -from twisted.internet.endpoints import HostnameEndpoint -from twisted.internet.interfaces import IOpenSSLContextFactory, IProtocolFactory -from twisted.internet.ssl import optionsForClientTLS -from twisted.mail.smtp import ESMTPSender, ESMTPSenderFactory -from twisted.protocols.tls import TLSMemoryBIOFactory - -from synapse.logging.context import make_deferred_yieldable -from synapse.types import ISynapseReactor - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - -_is_old_twisted = parse_version(twisted.__version__) < parse_version("21") - - -class _NoTLSESMTPSender(ESMTPSender): - """Extend ESMTPSender to disable TLS - - Unfortunately, before Twisted 21.2, ESMTPSender doesn't give an easy way to disable - TLS, so we override its internal method which it uses to generate a context factory. - """ - - def _getContextFactory(self) -> Optional[IOpenSSLContextFactory]: - return None - - -async def _sendmail( - reactor: ISynapseReactor, - smtphost: str, - smtpport: int, - from_addr: str, - to_addr: str, - msg_bytes: bytes, - username: Optional[bytes] = None, - password: Optional[bytes] = None, - require_auth: bool = False, - require_tls: bool = False, - enable_tls: bool = True, - force_tls: bool = False, -) -> None: - """A simple wrapper around ESMTPSenderFactory, to allow substitution in tests - - Params: - reactor: reactor to use to make the outbound connection - smtphost: hostname to connect to - smtpport: port to connect to - from_addr: "From" address for email - to_addr: "To" address for email - msg_bytes: Message content - username: username to authenticate with, if auth is enabled - password: password to give when authenticating - require_auth: if auth is not offered, fail the request - require_tls: if TLS is not offered, fail the reqest - enable_tls: True to enable STARTTLS. If this is False and require_tls is True, - the request will fail. - force_tls: True to enable Implicit TLS. - """ - msg = BytesIO(msg_bytes) - d: "Deferred[object]" = Deferred() - - def build_sender_factory(**kwargs: Any) -> ESMTPSenderFactory: - return ESMTPSenderFactory( - username, - password, - from_addr, - to_addr, - msg, - d, - heloFallback=True, - requireAuthentication=require_auth, - requireTransportSecurity=require_tls, - **kwargs, - ) - - factory: IProtocolFactory - if _is_old_twisted: - # before twisted 21.2, we have to override the ESMTPSender protocol to disable - # TLS - factory = build_sender_factory() - - if not enable_tls: - factory.protocol = _NoTLSESMTPSender - else: - # for twisted 21.2 and later, there is a 'hostname' parameter which we should - # set to enable TLS. - factory = build_sender_factory(hostname=smtphost if enable_tls else None) - - if force_tls: - factory = TLSMemoryBIOFactory(optionsForClientTLS(smtphost), True, factory) - - endpoint = HostnameEndpoint( - reactor, smtphost, smtpport, timeout=30, bindAddress=None - ) - - await make_deferred_yieldable(endpoint.connect(factory)) - - await make_deferred_yieldable(d) - - -class SendEmailHandler: - def __init__(self, hs: "HomeServer"): - self.hs = hs - - self._reactor = hs.get_reactor() - - self._from = hs.config.email.email_notif_from - self._smtp_host = hs.config.email.email_smtp_host - self._smtp_port = hs.config.email.email_smtp_port - - user = hs.config.email.email_smtp_user - self._smtp_user = user.encode("utf-8") if user is not None else None - passwd = hs.config.email.email_smtp_pass - self._smtp_pass = passwd.encode("utf-8") if passwd is not None else None - self._require_transport_security = hs.config.email.require_transport_security - self._enable_tls = hs.config.email.enable_smtp_tls - self._force_tls = hs.config.email.force_tls - - self._sendmail = _sendmail - - async def send_email( - self, - email_address: str, - subject: str, - app_name: str, - html: str, - text: str, - additional_headers: Optional[Dict[str, str]] = None, - ) -> None: - """Send a multipart email with the given information. - - Args: - email_address: The address to send the email to. - subject: The email's subject. - app_name: The app name to include in the From header. - html: The HTML content to include in the email. - text: The plain text content to include in the email. - additional_headers: A map of additional headers to include. - """ - try: - from_string = self._from % {"app": app_name} - except (KeyError, TypeError): - from_string = self._from - - raw_from = email.utils.parseaddr(from_string)[1] - raw_to = email.utils.parseaddr(email_address)[1] - - if raw_to == "": - raise RuntimeError("Invalid 'to' address") - - html_part = MIMEText(html, "html", "utf-8") - text_part = MIMEText(text, "plain", "utf-8") - - multipart_msg = MIMEMultipart("alternative") - multipart_msg["Subject"] = subject - multipart_msg["From"] = from_string - multipart_msg["To"] = email_address - multipart_msg["Date"] = email.utils.formatdate() - multipart_msg["Message-ID"] = email.utils.make_msgid() - - # Discourage automatic responses to Synapse's emails. - # Per RFC 3834, automatic responses should not be sent if the "Auto-Submitted" - # header is present with any value other than "no". See - # https://www.rfc-editor.org/rfc/rfc3834.html#section-5.1 - multipart_msg["Auto-Submitted"] = "auto-generated" - # Also include a Microsoft-Exchange specific header: - # https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxcmail/ced68690-498a-4567-9d14-5c01f974d8b1 - # which suggests it can take the value "All" to "suppress all auto-replies", - # or a comma separated list of auto-reply classes to suppress. - # The following stack overflow question has a little more context: - # https://stackoverflow.com/a/25324691/5252017 - # https://stackoverflow.com/a/61646381/5252017 - multipart_msg["X-Auto-Response-Suppress"] = "All" - - if additional_headers: - for header, value in additional_headers.items(): - multipart_msg[header] = value - - multipart_msg.attach(text_part) - multipart_msg.attach(html_part) - - logger.info("Sending email to %s" % email_address) - - await self._sendmail( - self._reactor, - self._smtp_host, - self._smtp_port, - raw_from, - raw_to, - multipart_msg.as_string().encode("utf8"), - username=self._smtp_user, - password=self._smtp_pass, - require_auth=self._smtp_user is not None, - require_tls=self._require_transport_security, - enable_tls=self._enable_tls, - force_tls=self._force_tls, - ) diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index 29cc03d71d..94301add9e 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py
@@ -36,10 +36,17 @@ class SetPasswordHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self._auth_handler = hs.get_auth_handler() - # This can only be instantiated on the main process. - device_handler = hs.get_device_handler() - assert isinstance(device_handler, DeviceHandler) - self._device_handler = device_handler + + # We don't need the device handler if password changing is disabled. + # This allows us to instantiate the SetPasswordHandler on the workers + # that have admin APIs for MAS + if self._auth_handler.can_change_password(): + # This can only be instantiated on the main process. + device_handler = hs.get_device_handler() + assert isinstance(device_handler, DeviceHandler) + self._device_handler: Optional[DeviceHandler] = device_handler + else: + self._device_handler = None async def set_password( self, @@ -51,6 +58,9 @@ class SetPasswordHandler: if not self._auth_handler.can_change_password(): raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN) + # We should have this available only if password changing is enabled. + assert self._device_handler is not None + try: await self.store.user_set_password_hash(user_id, password_hash) except StoreError as e: diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py deleted file mode 100644
index 18a96843be..0000000000 --- a/synapse/handlers/sliding_sync.py +++ /dev/null
@@ -1,3158 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright (C) 2024 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# -import enum -import logging -from enum import Enum -from itertools import chain -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Final, - List, - Literal, - Mapping, - Optional, - Sequence, - Set, - Tuple, - Union, -) - -import attr -from immutabledict import immutabledict -from typing_extensions import assert_never - -from synapse.api.constants import ( - AccountDataTypes, - Direction, - EventContentFields, - EventTypes, - Membership, -) -from synapse.api.errors import SlidingSyncUnknownPosition -from synapse.events import EventBase, StrippedStateEvent -from synapse.events.utils import parse_stripped_state_event, strip_event -from synapse.handlers.relations import BundledAggregations -from synapse.logging.opentracing import ( - SynapseTags, - log_kv, - set_tag, - start_active_span, - tag_args, - trace, -) -from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary -from synapse.storage.databases.main.state import ( - ROOM_UNKNOWN_SENTINEL, - Sentinel as StateSentinel, -) -from synapse.storage.databases.main.stream import ( - CurrentStateDeltaMembership, - PaginateFunction, -) -from synapse.storage.roommember import MemberSummary -from synapse.types import ( - DeviceListUpdates, - JsonDict, - JsonMapping, - MultiWriterStreamToken, - MutableStateMap, - PersistedEventPosition, - Requester, - RoomStreamToken, - SlidingSyncStreamToken, - StateMap, - StrCollection, - StreamKeyType, - StreamToken, - UserID, -) -from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult -from synapse.types.state import StateFilter -from synapse.util.async_helpers import concurrently_execute -from synapse.visibility import filter_events_for_client - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class Sentinel(enum.Enum): - # defining a sentinel in this way allows mypy to correctly handle the - # type of a dictionary lookup and subsequent type narrowing. - UNSET_SENTINEL = object() - - -# The event types that clients should consider as new activity. -DEFAULT_BUMP_EVENT_TYPES = { - EventTypes.Create, - EventTypes.Message, - EventTypes.Encrypted, - EventTypes.Sticker, - EventTypes.CallInvite, - EventTypes.PollStart, - EventTypes.LiveLocationShareStart, -} - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class _RoomMembershipForUser: - """ - Attributes: - room_id: The room ID of the membership event - event_id: The event ID of the membership event - event_pos: The stream position of the membership event - membership: The membership state of the user in the room - sender: The person who sent the membership event - newly_joined: Whether the user newly joined the room during the given token - range and is still joined to the room at the end of this range. - newly_left: Whether the user newly left (or kicked) the room during the given - token range and is still "leave" at the end of this range. - is_dm: Whether this user considers this room as a direct-message (DM) room - """ - - room_id: str - # Optional because state resets can affect room membership without a corresponding event. - event_id: Optional[str] - # Even during a state reset which removes the user from the room, we expect this to - # be set because `current_state_delta_stream` will note the position that the reset - # happened. - event_pos: PersistedEventPosition - # Even during a state reset which removes the user from the room, we expect this to - # be set to `LEAVE` because we can make that assumption based on the situaton (see - # `get_current_state_delta_membership_changes_for_user(...)`) - membership: str - # Optional because state resets can affect room membership without a corresponding event. - sender: Optional[str] - newly_joined: bool - newly_left: bool - is_dm: bool - - def copy_and_replace(self, **kwds: Any) -> "_RoomMembershipForUser": - return attr.evolve(self, **kwds) - - -def filter_membership_for_sync( - *, user_id: str, room_membership_for_user: _RoomMembershipForUser -) -> bool: - """ - Returns True if the membership event should be included in the sync response, - otherwise False. - - Attributes: - user_id: The user ID that the membership applies to - room_membership_for_user: Membership information for the user in the room - """ - - membership = room_membership_for_user.membership - sender = room_membership_for_user.sender - newly_left = room_membership_for_user.newly_left - - # We want to allow everything except rooms the user has left unless `newly_left` - # because we want everything that's *still* relevant to the user. We include - # `newly_left` rooms because the last event that the user should see is their own - # leave event. - # - # A leave != kick. This logic includes kicks (leave events where the sender is not - # the same user). - # - # When `sender=None`, it means that a state reset happened that removed the user - # from the room without a corresponding leave event. We can just remove the rooms - # since they are no longer relevant to the user but will still appear if they are - # `newly_left`. - return ( - # Anything except leave events - membership != Membership.LEAVE - # Unless... - or newly_left - # Allow kicks - or (membership == Membership.LEAVE and sender not in (user_id, None)) - ) - - -# We can't freeze this class because we want to update it in place with the -# de-duplicated data. -@attr.s(slots=True, auto_attribs=True) -class RoomSyncConfig: - """ - Holds the config for what data we should fetch for a room in the sync response. - - Attributes: - timeline_limit: The maximum number of events to return in the timeline. - - required_state_map: Map from state event type to state_keys requested for the - room. The values are close to `StateKey` but actually use a syntax where you - can provide `*` wildcard and `$LAZY` for lazy-loading room members. - """ - - timeline_limit: int - required_state_map: Dict[str, Set[str]] - - @classmethod - def from_room_config( - cls, - room_params: SlidingSyncConfig.CommonRoomParameters, - ) -> "RoomSyncConfig": - """ - Create a `RoomSyncConfig` from a `SlidingSyncList`/`RoomSubscription` config. - - Args: - room_params: `SlidingSyncConfig.SlidingSyncList` or `SlidingSyncConfig.RoomSubscription` - """ - required_state_map: Dict[str, Set[str]] = {} - for ( - state_type, - state_key, - ) in room_params.required_state: - # If we already have a wildcard for this specific `state_key`, we don't need - # to add it since the wildcard already covers it. - if state_key in required_state_map.get(StateValues.WILDCARD, set()): - continue - - # If we already have a wildcard `state_key` for this `state_type`, we don't need - # to add anything else - if StateValues.WILDCARD in required_state_map.get(state_type, set()): - continue - - # If we're getting wildcards for the `state_type` and `state_key`, that's - # all that matters so get rid of any other entries - if state_type == StateValues.WILDCARD and state_key == StateValues.WILDCARD: - required_state_map = {StateValues.WILDCARD: {StateValues.WILDCARD}} - # We can break, since we don't need to add anything else - break - - # If we're getting a wildcard for the `state_type`, get rid of any other - # entries with the same `state_key`, since the wildcard will cover it already. - elif state_type == StateValues.WILDCARD: - # Get rid of any entries that match the `state_key` - # - # Make a copy so we don't run into an error: `dictionary changed size - # during iteration`, when we remove items - for ( - existing_state_type, - existing_state_key_set, - ) in list(required_state_map.items()): - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for existing_state_key in existing_state_key_set.copy(): - if existing_state_key == state_key: - existing_state_key_set.remove(state_key) - - # If we've the left the `set()` empty, remove it from the map - if existing_state_key_set == set(): - required_state_map.pop(existing_state_type, None) - - # If we're getting a wildcard `state_key`, get rid of any other state_keys - # for this `state_type` since the wildcard will cover it already. - if state_key == StateValues.WILDCARD: - required_state_map[state_type] = {state_key} - # Otherwise, just add it to the set - else: - if required_state_map.get(state_type) is None: - required_state_map[state_type] = {state_key} - else: - required_state_map[state_type].add(state_key) - - return cls( - timeline_limit=room_params.timeline_limit, - required_state_map=required_state_map, - ) - - def deep_copy(self) -> "RoomSyncConfig": - required_state_map: Dict[str, Set[str]] = { - state_type: state_key_set.copy() - for state_type, state_key_set in self.required_state_map.items() - } - - return RoomSyncConfig( - timeline_limit=self.timeline_limit, - required_state_map=required_state_map, - ) - - def combine_room_sync_config( - self, other_room_sync_config: "RoomSyncConfig" - ) -> None: - """ - Combine this `RoomSyncConfig` with another `RoomSyncConfig` and take the - superset union of the two. - """ - # Take the highest timeline limit - if self.timeline_limit < other_room_sync_config.timeline_limit: - self.timeline_limit = other_room_sync_config.timeline_limit - - # Union the required state - for ( - state_type, - state_key_set, - ) in other_room_sync_config.required_state_map.items(): - # If we already have a wildcard for everything, we don't need to add - # anything else - if StateValues.WILDCARD in self.required_state_map.get( - StateValues.WILDCARD, set() - ): - break - - # If we already have a wildcard `state_key` for this `state_type`, we don't need - # to add anything else - if StateValues.WILDCARD in self.required_state_map.get(state_type, set()): - continue - - # If we're getting wildcards for the `state_type` and `state_key`, that's - # all that matters so get rid of any other entries - if ( - state_type == StateValues.WILDCARD - and StateValues.WILDCARD in state_key_set - ): - self.required_state_map = {state_type: {StateValues.WILDCARD}} - # We can break, since we don't need to add anything else - break - - for state_key in state_key_set: - # If we already have a wildcard for this specific `state_key`, we don't need - # to add it since the wildcard already covers it. - if state_key in self.required_state_map.get( - StateValues.WILDCARD, set() - ): - continue - - # If we're getting a wildcard for the `state_type`, get rid of any other - # entries with the same `state_key`, since the wildcard will cover it already. - if state_type == StateValues.WILDCARD: - # Get rid of any entries that match the `state_key` - # - # Make a copy so we don't run into an error: `dictionary changed size - # during iteration`, when we remove items - for existing_state_type, existing_state_key_set in list( - self.required_state_map.items() - ): - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for existing_state_key in existing_state_key_set.copy(): - if existing_state_key == state_key: - existing_state_key_set.remove(state_key) - - # If we've the left the `set()` empty, remove it from the map - if existing_state_key_set == set(): - self.required_state_map.pop(existing_state_type, None) - - # If we're getting a wildcard `state_key`, get rid of any other state_keys - # for this `state_type` since the wildcard will cover it already. - if state_key == StateValues.WILDCARD: - self.required_state_map[state_type] = {state_key} - break - # Otherwise, just add it to the set - else: - if self.required_state_map.get(state_type) is None: - self.required_state_map[state_type] = {state_key} - else: - self.required_state_map[state_type].add(state_key) - - -class StateValues: - """ - Understood values of the (type, state_key) tuple in `required_state`. - """ - - # Include all state events of the given type - WILDCARD: Final = "*" - # Lazy-load room membership events (include room membership events for any event - # `sender` in the timeline). We only give special meaning to this value when it's a - # `state_key`. - LAZY: Final = "$LAZY" - # Subsitute with the requester's user ID. Typically used by clients to get - # the user's membership. - ME: Final = "$ME" - - -class SlidingSyncHandler: - def __init__(self, hs: "HomeServer"): - self.clock = hs.get_clock() - self.store = hs.get_datastores().main - self.storage_controllers = hs.get_storage_controllers() - self.auth_blocking = hs.get_auth_blocking() - self.notifier = hs.get_notifier() - self.event_sources = hs.get_event_sources() - self.relations_handler = hs.get_relations_handler() - self.device_handler = hs.get_device_handler() - self.push_rules_handler = hs.get_push_rules_handler() - self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync - - self.connection_store = SlidingSyncConnectionStore() - - async def wait_for_sync_for_user( - self, - requester: Requester, - sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken] = None, - timeout_ms: int = 0, - ) -> SlidingSyncResult: - """ - Get the sync for a client if we have new data for it now. Otherwise - wait for new data to arrive on the server. If the timeout expires, then - return an empty sync result. - - Args: - requester: The user making the request - sync_config: Sync configuration - from_token: The point in the stream to sync from. Token of the end of the - previous batch. May be `None` if this is the initial sync request. - timeout_ms: The time in milliseconds to wait for new data to arrive. If 0, - we will immediately but there might not be any new data so we just return an - empty response. - """ - # If the user is not part of the mau group, then check that limits have - # not been exceeded (if not part of the group by this point, almost certain - # auth_blocking will occur) - await self.auth_blocking.check_auth_blocking(requester=requester) - - # If we're working with a user-provided token, we need to make sure to wait for - # this worker to catch up with the token so we don't skip past any incoming - # events or future events if the user is nefariously, manually modifying the - # token. - if from_token is not None: - # We need to make sure this worker has caught up with the token. If - # this returns false, it means we timed out waiting, and we should - # just return an empty response. - before_wait_ts = self.clock.time_msec() - if not await self.notifier.wait_for_stream_token(from_token.stream_token): - logger.warning( - "Timed out waiting for worker to catch up. Returning empty response" - ) - return SlidingSyncResult.empty(from_token) - - # If we've spent significant time waiting to catch up, take it off - # the timeout. - after_wait_ts = self.clock.time_msec() - if after_wait_ts - before_wait_ts > 1_000: - timeout_ms -= after_wait_ts - before_wait_ts - timeout_ms = max(timeout_ms, 0) - - # We're going to respond immediately if the timeout is 0 or if this is an - # initial sync (without a `from_token`) so we can avoid calling - # `notifier.wait_for_events()`. - if timeout_ms == 0 or from_token is None: - now_token = self.event_sources.get_current_token() - result = await self.current_sync_for_user( - sync_config, - from_token=from_token, - to_token=now_token, - ) - else: - # Otherwise, we wait for something to happen and report it to the user. - async def current_sync_callback( - before_token: StreamToken, after_token: StreamToken - ) -> SlidingSyncResult: - return await self.current_sync_for_user( - sync_config, - from_token=from_token, - to_token=after_token, - ) - - result = await self.notifier.wait_for_events( - sync_config.user.to_string(), - timeout_ms, - current_sync_callback, - from_token=from_token.stream_token, - ) - - return result - - @trace - async def current_sync_for_user( - self, - sync_config: SlidingSyncConfig, - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken] = None, - ) -> SlidingSyncResult: - """ - Generates the response body of a Sliding Sync result, represented as a - `SlidingSyncResult`. - - We fetch data according to the token range (> `from_token` and <= `to_token`). - - Args: - sync_config: Sync configuration - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. Token of the end of the - previous batch. May be `None` if this is the initial sync request. - """ - user_id = sync_config.user.to_string() - app_service = self.store.get_app_service_by_user_id(user_id) - if app_service: - # We no longer support AS users using /sync directly. - # See https://github.com/matrix-org/matrix-doc/issues/1144 - raise NotImplementedError() - - if from_token: - # Check that we recognize the connection position, if not tell the - # clients that they need to start again. - # - # If we don't do this and the client asks for the full range of - # rooms, we end up sending down all rooms and their state from - # scratch (which can be very slow). By expiring the connection we - # allow the client a chance to do an initial request with a smaller - # range of rooms to get them some results sooner but will end up - # taking the same amount of time (more with round-trips and - # re-processing) in the end to get everything again. - if not await self.connection_store.is_valid_token( - sync_config, from_token.connection_position - ): - raise SlidingSyncUnknownPosition() - - await self.connection_store.mark_token_seen( - sync_config=sync_config, - from_token=from_token, - ) - - # Get all of the room IDs that the user should be able to see in the sync - # response - has_lists = sync_config.lists is not None and len(sync_config.lists) > 0 - has_room_subscriptions = ( - sync_config.room_subscriptions is not None - and len(sync_config.room_subscriptions) > 0 - ) - if has_lists or has_room_subscriptions: - room_membership_for_user_map = ( - await self.get_room_membership_for_user_at_to_token( - user=sync_config.user, - to_token=to_token, - from_token=from_token.stream_token if from_token else None, - ) - ) - - # Assemble sliding window lists - lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} - # Keep track of the rooms that we can display and need to fetch more info about - relevant_room_map: Dict[str, RoomSyncConfig] = {} - # The set of room IDs of all rooms that could appear in any list. These - # include rooms that are outside the list ranges. - all_rooms: Set[str] = set() - if has_lists and sync_config.lists is not None: - with start_active_span("assemble_sliding_window_lists"): - sync_room_map = await self.filter_rooms_relevant_for_sync( - user=sync_config.user, - room_membership_for_user_map=room_membership_for_user_map, - ) - - for list_key, list_config in sync_config.lists.items(): - # Apply filters - filtered_sync_room_map = sync_room_map - if list_config.filters is not None: - filtered_sync_room_map = await self.filter_rooms( - sync_config.user, - sync_room_map, - list_config.filters, - to_token, - ) - - # Find which rooms are partially stated and may need to be filtered out - # depending on the `required_state` requested (see below). - partial_state_room_map = ( - await self.store.is_partial_state_room_batched( - filtered_sync_room_map.keys() - ) - ) - - # Since creating the `RoomSyncConfig` takes some work, let's just do it - # once and make a copy whenever we need it. - room_sync_config = RoomSyncConfig.from_room_config(list_config) - membership_state_keys = room_sync_config.required_state_map.get( - EventTypes.Member - ) - # Also see `StateFilter.must_await_full_state(...)` for comparison - lazy_loading = ( - membership_state_keys is not None - and StateValues.LAZY in membership_state_keys - ) - - if not lazy_loading: - # Exclude partially-stated rooms unless the `required_state` - # only has `["m.room.member", "$LAZY"]` for membership - # (lazy-loading room members). - filtered_sync_room_map = { - room_id: room - for room_id, room in filtered_sync_room_map.items() - if not partial_state_room_map.get(room_id) - } - - all_rooms.update(filtered_sync_room_map) - - # Sort the list - sorted_room_info = await self.sort_rooms( - filtered_sync_room_map, to_token - ) - - ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] - if list_config.ranges: - for range in list_config.ranges: - room_ids_in_list: List[str] = [] - - # We're going to loop through the sorted list of rooms starting - # at the range start index and keep adding rooms until we fill - # up the range or run out of rooms. - # - # Both sides of range are inclusive so we `+ 1` - max_num_rooms = range[1] - range[0] + 1 - for room_membership in sorted_room_info[range[0] :]: - room_id = room_membership.room_id - - if len(room_ids_in_list) >= max_num_rooms: - break - - # Take the superset of the `RoomSyncConfig` for each room. - # - # Update our `relevant_room_map` with the room we're going - # to display and need to fetch more info about. - existing_room_sync_config = relevant_room_map.get( - room_id - ) - if existing_room_sync_config is not None: - existing_room_sync_config.combine_room_sync_config( - room_sync_config - ) - else: - # Make a copy so if we modify it later, it doesn't - # affect all references. - relevant_room_map[room_id] = ( - room_sync_config.deep_copy() - ) - - room_ids_in_list.append(room_id) - - ops.append( - SlidingSyncResult.SlidingWindowList.Operation( - op=OperationType.SYNC, - range=range, - room_ids=room_ids_in_list, - ) - ) - - lists[list_key] = SlidingSyncResult.SlidingWindowList( - count=len(sorted_room_info), - ops=ops, - ) - - # Handle room subscriptions - if has_room_subscriptions and sync_config.room_subscriptions is not None: - with start_active_span("assemble_room_subscriptions"): - for ( - room_id, - room_subscription, - ) in sync_config.room_subscriptions.items(): - room_membership_for_user_at_to_token = ( - await self.check_room_subscription_allowed_for_user( - room_id=room_id, - room_membership_for_user_map=room_membership_for_user_map, - to_token=to_token, - ) - ) - - # Skip this room if the user isn't allowed to see it - if not room_membership_for_user_at_to_token: - continue - - all_rooms.add(room_id) - - room_membership_for_user_map[room_id] = ( - room_membership_for_user_at_to_token - ) - - # Take the superset of the `RoomSyncConfig` for each room. - # - # Update our `relevant_room_map` with the room we're going to display - # and need to fetch more info about. - room_sync_config = RoomSyncConfig.from_room_config( - room_subscription - ) - existing_room_sync_config = relevant_room_map.get(room_id) - if existing_room_sync_config is not None: - existing_room_sync_config.combine_room_sync_config( - room_sync_config - ) - else: - relevant_room_map[room_id] = room_sync_config - - # Fetch room data - rooms: Dict[str, SlidingSyncResult.RoomResult] = {} - - # Filter out rooms that haven't received updates and we've sent down - # previously. - # Keep track of the rooms that we're going to display and need to fetch more info about - relevant_rooms_to_send_map = relevant_room_map - with start_active_span("filter_relevant_rooms_to_send"): - if from_token: - rooms_should_send = set() - - # First we check if there are rooms that match a list/room - # subscription and have updates we need to send (i.e. either because - # we haven't sent the room down, or we have but there are missing - # updates). - for room_id in relevant_room_map: - status = await self.connection_store.have_sent_room( - sync_config, - from_token.connection_position, - room_id, - ) - if ( - # The room was never sent down before so the client needs to know - # about it regardless of any updates. - status.status == HaveSentRoomFlag.NEVER - # `PREVIOUSLY` literally means the "room was sent down before *AND* - # there are updates we haven't sent down" so we already know this - # room has updates. - or status.status == HaveSentRoomFlag.PREVIOUSLY - ): - rooms_should_send.add(room_id) - elif status.status == HaveSentRoomFlag.LIVE: - # We know that we've sent all updates up until `from_token`, - # so we just need to check if there have been updates since - # then. - pass - else: - assert_never(status.status) - - # We only need to check for new events since any state changes - # will also come down as new events. - rooms_that_have_updates = self.store.get_rooms_that_might_have_updates( - relevant_room_map.keys(), from_token.stream_token.room_key - ) - rooms_should_send.update(rooms_that_have_updates) - relevant_rooms_to_send_map = { - room_id: room_sync_config - for room_id, room_sync_config in relevant_room_map.items() - if room_id in rooms_should_send - } - - @trace - @tag_args - async def handle_room(room_id: str) -> None: - room_sync_result = await self.get_room_sync_data( - sync_config=sync_config, - room_id=room_id, - room_sync_config=relevant_rooms_to_send_map[room_id], - room_membership_for_user_at_to_token=room_membership_for_user_map[ - room_id - ], - from_token=from_token, - to_token=to_token, - ) - - # Filter out empty room results during incremental sync - if room_sync_result or not from_token: - rooms[room_id] = room_sync_result - - if relevant_rooms_to_send_map: - with start_active_span("sliding_sync.generate_room_entries"): - await concurrently_execute(handle_room, relevant_rooms_to_send_map, 10) - - extensions = await self.get_extensions_response( - sync_config=sync_config, - actual_lists=lists, - # We're purposely using `relevant_room_map` instead of - # `relevant_rooms_to_send_map` here. This needs to be all room_ids we could - # send regardless of whether they have an event update or not. The - # extensions care about more than just normal events in the rooms (like - # account data, read receipts, typing indicators, to-device messages, etc). - actual_room_ids=set(relevant_room_map.keys()), - actual_room_response_map=rooms, - from_token=from_token, - to_token=to_token, - ) - - if has_lists or has_room_subscriptions: - # We now calculate if any rooms outside the range have had updates, - # which we are not sending down. - # - # We *must* record rooms that have had updates, but it is also fine - # to record rooms as having updates even if there might not actually - # be anything new for the user (e.g. due to event filters, events - # having happened after the user left, etc). - unsent_room_ids = [] - if from_token: - # The set of rooms that the client (may) care about, but aren't - # in any list range (or subscribed to). - missing_rooms = all_rooms - relevant_room_map.keys() - - # We now just go and try fetching any events in the above rooms - # to see if anything has happened since the `from_token`. - # - # TODO: Replace this with something faster. When we land the - # sliding sync tables that record the most recent event - # positions we can use that. - missing_event_map_by_room = ( - await self.store.get_room_events_stream_for_rooms( - room_ids=missing_rooms, - from_key=to_token.room_key, - to_key=from_token.stream_token.room_key, - limit=1, - ) - ) - unsent_room_ids = list(missing_event_map_by_room) - - connection_position = await self.connection_store.record_rooms( - sync_config=sync_config, - from_token=from_token, - sent_room_ids=relevant_rooms_to_send_map.keys(), - unsent_room_ids=unsent_room_ids, - ) - elif from_token: - connection_position = from_token.connection_position - else: - # Initial sync without a `from_token` starts at `0` - connection_position = 0 - - sliding_sync_result = SlidingSyncResult( - next_pos=SlidingSyncStreamToken(to_token, connection_position), - lists=lists, - rooms=rooms, - extensions=extensions, - ) - - # Make it easy to find traces for syncs that aren't empty - set_tag(SynapseTags.RESULT_PREFIX + "result", bool(sliding_sync_result)) - set_tag(SynapseTags.FUNC_ARG_PREFIX + "sync_config.user", user_id) - - return sliding_sync_result - - @trace - async def get_room_membership_for_user_at_to_token( - self, - user: UserID, - to_token: StreamToken, - from_token: Optional[StreamToken], - ) -> Dict[str, _RoomMembershipForUser]: - """ - Fetch room IDs that the user has had membership in (the full room list including - long-lost left rooms that will be filtered, sorted, and sliced). - - We're looking for rooms where the user has had any sort of membership in the - token range (> `from_token` and <= `to_token`) - - In order for bans/kicks to not show up, you need to `/forget` those rooms. This - doesn't modify the event itself though and only adds the `forgotten` flag to the - `room_memberships` table in Synapse. There isn't a way to tell when a room was - forgotten at the moment so we can't factor it into the token range. - - Args: - user: User to fetch rooms for - to_token: The token to fetch rooms up to. - from_token: The point in the stream to sync from. - - Returns: - A dictionary of room IDs that the user has had membership in along with - membership information in that room at the time of `to_token`. - """ - user_id = user.to_string() - - # First grab a current snapshot rooms for the user - # (also handles forgotten rooms) - room_for_user_list = await self.store.get_rooms_for_local_user_where_membership_is( - user_id=user_id, - # We want to fetch any kind of membership (joined and left rooms) in order - # to get the `event_pos` of the latest room membership event for the - # user. - membership_list=Membership.LIST, - excluded_rooms=self.rooms_to_exclude_globally, - ) - - # If the user has never joined any rooms before, we can just return an empty list - if not room_for_user_list: - return {} - - # Our working list of rooms that can show up in the sync response - sync_room_id_set = { - # Note: The `room_for_user` we're assigning here will need to be fixed up - # (below) because they are potentially from the current snapshot time - # instead from the time of the `to_token`. - room_for_user.room_id: _RoomMembershipForUser( - room_id=room_for_user.room_id, - event_id=room_for_user.event_id, - event_pos=room_for_user.event_pos, - membership=room_for_user.membership, - sender=room_for_user.sender, - # We will update these fields below to be accurate - newly_joined=False, - newly_left=False, - is_dm=False, - ) - for room_for_user in room_for_user_list - } - - # Get the `RoomStreamToken` that represents the spot we queried up to when we got - # our membership snapshot from `get_rooms_for_local_user_where_membership_is()`. - # - # First, we need to get the max stream_ordering of each event persister instance - # that we queried events from. - instance_to_max_stream_ordering_map: Dict[str, int] = {} - for room_for_user in room_for_user_list: - instance_name = room_for_user.event_pos.instance_name - stream_ordering = room_for_user.event_pos.stream - - current_instance_max_stream_ordering = ( - instance_to_max_stream_ordering_map.get(instance_name) - ) - if ( - current_instance_max_stream_ordering is None - or stream_ordering > current_instance_max_stream_ordering - ): - instance_to_max_stream_ordering_map[instance_name] = stream_ordering - - # Then assemble the `RoomStreamToken` - min_stream_pos = min(instance_to_max_stream_ordering_map.values()) - membership_snapshot_token = RoomStreamToken( - # Minimum position in the `instance_map` - stream=min_stream_pos, - instance_map=immutabledict( - { - instance_name: stream_pos - for instance_name, stream_pos in instance_to_max_stream_ordering_map.items() - if stream_pos > min_stream_pos - } - ), - ) - - # Since we fetched the users room list at some point in time after the from/to - # tokens, we need to revert/rewind some membership changes to match the point in - # time of the `to_token`. In particular, we need to make these fixups: - # - # - 1a) Remove rooms that the user joined after the `to_token` - # - 1b) Add back rooms that the user left after the `to_token` - # - 1c) Update room membership events to the point in time of the `to_token` - # - 2) Figure out which rooms are `newly_left` rooms (> `from_token` and <= `to_token`) - # - 3) Figure out which rooms are `newly_joined` (> `from_token` and <= `to_token`) - # - 4) Figure out which rooms are DM's - - # 1) Fetch membership changes that fall in the range from `to_token` up to - # `membership_snapshot_token` - # - # If our `to_token` is already the same or ahead of the latest room membership - # for the user, we don't need to do any "2)" fix-ups and can just straight-up - # use the room list from the snapshot as a base (nothing has changed) - current_state_delta_membership_changes_after_to_token = [] - if not membership_snapshot_token.is_before_or_eq(to_token.room_key): - current_state_delta_membership_changes_after_to_token = ( - await self.store.get_current_state_delta_membership_changes_for_user( - user_id, - from_key=to_token.room_key, - to_key=membership_snapshot_token, - excluded_room_ids=self.rooms_to_exclude_globally, - ) - ) - - # 1) Assemble a list of the first membership event after the `to_token` so we can - # step backward to the previous membership that would apply to the from/to - # range. - first_membership_change_by_room_id_after_to_token: Dict[ - str, CurrentStateDeltaMembership - ] = {} - for membership_change in current_state_delta_membership_changes_after_to_token: - # Only set if we haven't already set it - first_membership_change_by_room_id_after_to_token.setdefault( - membership_change.room_id, membership_change - ) - - # 1) Fixup - # - # Since we fetched a snapshot of the users room list at some point in time after - # the from/to tokens, we need to revert/rewind some membership changes to match - # the point in time of the `to_token`. - for ( - room_id, - first_membership_change_after_to_token, - ) in first_membership_change_by_room_id_after_to_token.items(): - # 1a) Remove rooms that the user joined after the `to_token` - if first_membership_change_after_to_token.prev_event_id is None: - sync_room_id_set.pop(room_id, None) - # 1b) 1c) From the first membership event after the `to_token`, step backward to the - # previous membership that would apply to the from/to range. - else: - # We don't expect these fields to be `None` if we have a `prev_event_id` - # but we're being defensive since it's possible that the prev event was - # culled from the database. - if ( - first_membership_change_after_to_token.prev_event_pos is not None - and first_membership_change_after_to_token.prev_membership - is not None - ): - sync_room_id_set[room_id] = _RoomMembershipForUser( - room_id=room_id, - event_id=first_membership_change_after_to_token.prev_event_id, - event_pos=first_membership_change_after_to_token.prev_event_pos, - membership=first_membership_change_after_to_token.prev_membership, - sender=first_membership_change_after_to_token.prev_sender, - # We will update these fields below to be accurate - newly_joined=False, - newly_left=False, - is_dm=False, - ) - else: - # If we can't find the previous membership event, we shouldn't - # include the room in the sync response since we can't determine the - # exact membership state and shouldn't rely on the current snapshot. - sync_room_id_set.pop(room_id, None) - - # 2) Fetch membership changes that fall in the range from `from_token` up to `to_token` - current_state_delta_membership_changes_in_from_to_range = [] - if from_token: - current_state_delta_membership_changes_in_from_to_range = ( - await self.store.get_current_state_delta_membership_changes_for_user( - user_id, - from_key=from_token.room_key, - to_key=to_token.room_key, - excluded_room_ids=self.rooms_to_exclude_globally, - ) - ) - - # 2) Assemble a list of the last membership events in some given ranges. Someone - # could have left and joined multiple times during the given range but we only - # care about end-result so we grab the last one. - last_membership_change_by_room_id_in_from_to_range: Dict[ - str, CurrentStateDeltaMembership - ] = {} - # We also want to assemble a list of the first membership events during the token - # range so we can step backward to the previous membership that would apply to - # before the token range to see if we have `newly_joined` the room. - first_membership_change_by_room_id_in_from_to_range: Dict[ - str, CurrentStateDeltaMembership - ] = {} - # Keep track if the room has a non-join event in the token range so we can later - # tell if it was a `newly_joined` room. If the last membership event in the - # token range is a join and there is also some non-join in the range, we know - # they `newly_joined`. - has_non_join_event_by_room_id_in_from_to_range: Dict[str, bool] = {} - for ( - membership_change - ) in current_state_delta_membership_changes_in_from_to_range: - room_id = membership_change.room_id - - last_membership_change_by_room_id_in_from_to_range[room_id] = ( - membership_change - ) - # Only set if we haven't already set it - first_membership_change_by_room_id_in_from_to_range.setdefault( - room_id, membership_change - ) - - if membership_change.membership != Membership.JOIN: - has_non_join_event_by_room_id_in_from_to_range[room_id] = True - - # 2) Fixup - # - # 3) We also want to assemble a list of possibly newly joined rooms. Someone - # could have left and joined multiple times during the given range but we only - # care about whether they are joined at the end of the token range so we are - # working with the last membership even in the token range. - possibly_newly_joined_room_ids = set() - for ( - last_membership_change_in_from_to_range - ) in last_membership_change_by_room_id_in_from_to_range.values(): - room_id = last_membership_change_in_from_to_range.room_id - - # 3) - if last_membership_change_in_from_to_range.membership == Membership.JOIN: - possibly_newly_joined_room_ids.add(room_id) - - # 2) Figure out newly_left rooms (> `from_token` and <= `to_token`). - if last_membership_change_in_from_to_range.membership == Membership.LEAVE: - # 2) Mark this room as `newly_left` - - # If we're seeing a membership change here, we should expect to already - # have it in our snapshot but if a state reset happens, it wouldn't have - # shown up in our snapshot but appear as a change here. - existing_sync_entry = sync_room_id_set.get(room_id) - if existing_sync_entry is not None: - # Normal expected case - sync_room_id_set[room_id] = existing_sync_entry.copy_and_replace( - newly_left=True - ) - else: - # State reset! - logger.warn( - "State reset detected for room_id %s with %s who is no longer in the room", - room_id, - user_id, - ) - # Even though a state reset happened which removed the person from - # the room, we still add it the list so the user knows they left the - # room. Downstream code can check for a state reset by looking for - # `event_id=None and membership is not None`. - sync_room_id_set[room_id] = _RoomMembershipForUser( - room_id=room_id, - event_id=last_membership_change_in_from_to_range.event_id, - event_pos=last_membership_change_in_from_to_range.event_pos, - membership=last_membership_change_in_from_to_range.membership, - sender=last_membership_change_in_from_to_range.sender, - newly_joined=False, - newly_left=True, - is_dm=False, - ) - - # 3) Figure out `newly_joined` - for room_id in possibly_newly_joined_room_ids: - has_non_join_in_from_to_range = ( - has_non_join_event_by_room_id_in_from_to_range.get(room_id, False) - ) - # If the last membership event in the token range is a join and there is - # also some non-join in the range, we know they `newly_joined`. - if has_non_join_in_from_to_range: - # We found a `newly_joined` room (we left and joined within the token range) - sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( - newly_joined=True - ) - else: - prev_event_id = first_membership_change_by_room_id_in_from_to_range[ - room_id - ].prev_event_id - prev_membership = first_membership_change_by_room_id_in_from_to_range[ - room_id - ].prev_membership - - if prev_event_id is None: - # We found a `newly_joined` room (we are joining the room for the - # first time within the token range) - sync_room_id_set[room_id] = sync_room_id_set[ - room_id - ].copy_and_replace(newly_joined=True) - # Last resort, we need to step back to the previous membership event - # just before the token range to see if we're joined then or not. - elif prev_membership != Membership.JOIN: - # We found a `newly_joined` room (we left before the token range - # and joined within the token range) - sync_room_id_set[room_id] = sync_room_id_set[ - room_id - ].copy_and_replace(newly_joined=True) - - # 4) Figure out which rooms the user considers to be direct-message (DM) rooms - # - # We're using global account data (`m.direct`) instead of checking for - # `is_direct` on membership events because that property only appears for - # the invitee membership event (doesn't show up for the inviter). - # - # We're unable to take `to_token` into account for global account data since - # we only keep track of the latest account data for the user. - dm_map = await self.store.get_global_account_data_by_type_for_user( - user_id, AccountDataTypes.DIRECT - ) - - # Flatten out the map. Account data is set by the client so it needs to be - # scrutinized. - dm_room_id_set = set() - if isinstance(dm_map, dict): - for room_ids in dm_map.values(): - # Account data should be a list of room IDs. Ignore anything else - if isinstance(room_ids, list): - for room_id in room_ids: - if isinstance(room_id, str): - dm_room_id_set.add(room_id) - - # 4) Fixup - for room_id in sync_room_id_set: - sync_room_id_set[room_id] = sync_room_id_set[room_id].copy_and_replace( - is_dm=room_id in dm_room_id_set - ) - - return sync_room_id_set - - @trace - async def filter_rooms_relevant_for_sync( - self, - user: UserID, - room_membership_for_user_map: Dict[str, _RoomMembershipForUser], - ) -> Dict[str, _RoomMembershipForUser]: - """ - Filter room IDs that should/can be listed for this user in the sync response (the - full room list that will be further filtered, sorted, and sliced). - - We're looking for rooms where the user has the following state in the token - range (> `from_token` and <= `to_token`): - - - `invite`, `join`, `knock`, `ban` membership events - - Kicks (`leave` membership events where `sender` is different from the - `user_id`/`state_key`) - - `newly_left` (rooms that were left during the given token range) - - In order for bans/kicks to not show up in sync, you need to `/forget` those - rooms. This doesn't modify the event itself though and only adds the - `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way - to tell when a room was forgotten at the moment so we can't factor it into the - from/to range. - - Args: - user: User that is syncing - room_membership_for_user_map: Room membership for the user - - Returns: - A dictionary of room IDs that should be listed in the sync response along - with membership information in that room at the time of `to_token`. - """ - user_id = user.to_string() - - # Filter rooms to only what we're interested to sync with - filtered_sync_room_map = { - room_id: room_membership_for_user - for room_id, room_membership_for_user in room_membership_for_user_map.items() - if filter_membership_for_sync( - user_id=user_id, - room_membership_for_user=room_membership_for_user, - ) - } - - return filtered_sync_room_map - - async def check_room_subscription_allowed_for_user( - self, - room_id: str, - room_membership_for_user_map: Dict[str, _RoomMembershipForUser], - to_token: StreamToken, - ) -> Optional[_RoomMembershipForUser]: - """ - Check whether the user is allowed to see the room based on whether they have - ever had membership in the room or if the room is `world_readable`. - - Similar to `check_user_in_room_or_world_readable(...)` - - Args: - room_id: Room to check - room_membership_for_user_map: Room membership for the user at the time of - the `to_token` (<= `to_token`). - to_token: The token to fetch rooms up to. - - Returns: - The room membership for the user if they are allowed to subscribe to the - room else `None`. - """ - - # We can first check if they are already allowed to see the room based - # on our previous work to assemble the `room_membership_for_user_map`. - # - # If they have had any membership in the room over time (up to the `to_token`), - # let them subscribe and see what they can. - existing_membership_for_user = room_membership_for_user_map.get(room_id) - if existing_membership_for_user is not None: - return existing_membership_for_user - - # TODO: Handle `world_readable` rooms - return None - - # If the room is `world_readable`, it doesn't matter whether they can join, - # everyone can see the room. - # not_in_room_membership_for_user = _RoomMembershipForUser( - # room_id=room_id, - # event_id=None, - # event_pos=None, - # membership=None, - # sender=None, - # newly_joined=False, - # newly_left=False, - # is_dm=False, - # ) - # room_state = await self.get_current_state_at( - # room_id=room_id, - # room_membership_for_user_at_to_token=not_in_room_membership_for_user, - # state_filter=StateFilter.from_types( - # [(EventTypes.RoomHistoryVisibility, "")] - # ), - # to_token=to_token, - # ) - - # visibility_event = room_state.get((EventTypes.RoomHistoryVisibility, "")) - # if ( - # visibility_event is not None - # and visibility_event.content.get("history_visibility") - # == HistoryVisibility.WORLD_READABLE - # ): - # return not_in_room_membership_for_user - - # return None - - @trace - async def _bulk_get_stripped_state_for_rooms_from_sync_room_map( - self, - room_ids: StrCollection, - sync_room_map: Dict[str, _RoomMembershipForUser], - ) -> Dict[str, Optional[StateMap[StrippedStateEvent]]]: - """ - Fetch stripped state for a list of room IDs. Stripped state is only - applicable to invite/knock rooms. Other rooms will have `None` as their - stripped state. - - For invite rooms, we pull from `unsigned.invite_room_state`. - For knock rooms, we pull from `unsigned.knock_room_state`. - - Args: - room_ids: Room IDs to fetch stripped state for - sync_room_map: Dictionary of room IDs to sort along with membership - information in the room at the time of `to_token`. - - Returns: - Mapping from room_id to mapping of (type, state_key) to stripped state - event. - """ - room_id_to_stripped_state_map: Dict[ - str, Optional[StateMap[StrippedStateEvent]] - ] = {} - - # Fetch what we haven't before - room_ids_to_fetch = [ - room_id - for room_id in room_ids - if room_id not in room_id_to_stripped_state_map - ] - - # Gather a list of event IDs we can grab stripped state from - invite_or_knock_event_ids: List[str] = [] - for room_id in room_ids_to_fetch: - if sync_room_map[room_id].membership in ( - Membership.INVITE, - Membership.KNOCK, - ): - event_id = sync_room_map[room_id].event_id - # If this is an invite/knock then there should be an event_id - assert event_id is not None - invite_or_knock_event_ids.append(event_id) - else: - room_id_to_stripped_state_map[room_id] = None - - invite_or_knock_events = await self.store.get_events(invite_or_knock_event_ids) - for invite_or_knock_event in invite_or_knock_events.values(): - room_id = invite_or_knock_event.room_id - membership = invite_or_knock_event.membership - - raw_stripped_state_events = None - if membership == Membership.INVITE: - invite_room_state = invite_or_knock_event.unsigned.get( - "invite_room_state" - ) - raw_stripped_state_events = invite_room_state - elif membership == Membership.KNOCK: - knock_room_state = invite_or_knock_event.unsigned.get( - "knock_room_state" - ) - raw_stripped_state_events = knock_room_state - else: - raise AssertionError( - f"Unexpected membership {membership} (this is a problem with Synapse itself)" - ) - - stripped_state_map: Optional[MutableStateMap[StrippedStateEvent]] = None - # Scrutinize unsigned things. `raw_stripped_state_events` should be a list - # of stripped events - if raw_stripped_state_events is not None: - stripped_state_map = {} - if isinstance(raw_stripped_state_events, list): - for raw_stripped_event in raw_stripped_state_events: - stripped_state_event = parse_stripped_state_event( - raw_stripped_event - ) - if stripped_state_event is not None: - stripped_state_map[ - ( - stripped_state_event.type, - stripped_state_event.state_key, - ) - ] = stripped_state_event - - room_id_to_stripped_state_map[room_id] = stripped_state_map - - return room_id_to_stripped_state_map - - @trace - async def _bulk_get_partial_current_state_content_for_rooms( - self, - content_type: Literal[ - # `content.type` from `EventTypes.Create`` - "room_type", - # `content.algorithm` from `EventTypes.RoomEncryption` - "room_encryption", - ], - room_ids: Set[str], - sync_room_map: Dict[str, _RoomMembershipForUser], - to_token: StreamToken, - room_id_to_stripped_state_map: Dict[ - str, Optional[StateMap[StrippedStateEvent]] - ], - ) -> Mapping[str, Union[Optional[str], StateSentinel]]: - """ - Get the given state event content for a list of rooms. First we check the - current state of the room, then fallback to stripped state if available, then - historical state. - - Args: - content_type: Which content to grab - room_ids: Room IDs to fetch the given content field for. - sync_room_map: Dictionary of room IDs to sort along with membership - information in the room at the time of `to_token`. - to_token: We filter based on the state of the room at this token - room_id_to_stripped_state_map: This does not need to be filled in before - calling this function. Mapping from room_id to mapping of (type, state_key) - to stripped state event. Modified in place when we fetch new rooms so we can - save work next time this function is called. - - Returns: - A mapping from room ID to the state event content if the room has - the given state event (event_type, ""), otherwise `None`. Rooms unknown to - this server will return `ROOM_UNKNOWN_SENTINEL`. - """ - room_id_to_content: Dict[str, Union[Optional[str], StateSentinel]] = {} - - # As a bulk shortcut, use the current state if the server is particpating in the - # room (meaning we have current state). Ideally, for leave/ban rooms, we would - # want the state at the time of the membership instead of current state to not - # leak anything but we consider the create/encryption stripped state events to - # not be a secret given they are often set at the start of the room and they are - # normally handed out on invite/knock. - # - # Be mindful to only use this for non-sensitive details. For example, even - # though the room name/avatar/topic are also stripped state, they seem a lot - # more senstive to leak the current state value of. - # - # Since this function is cached, we need to make a mutable copy via - # `dict(...)`. - event_type = "" - event_content_field = "" - if content_type == "room_type": - event_type = EventTypes.Create - event_content_field = EventContentFields.ROOM_TYPE - room_id_to_content = dict(await self.store.bulk_get_room_type(room_ids)) - elif content_type == "room_encryption": - event_type = EventTypes.RoomEncryption - event_content_field = EventContentFields.ENCRYPTION_ALGORITHM - room_id_to_content = dict( - await self.store.bulk_get_room_encryption(room_ids) - ) - else: - assert_never(content_type) - - room_ids_with_results = [ - room_id - for room_id, content_field in room_id_to_content.items() - if content_field is not ROOM_UNKNOWN_SENTINEL - ] - - # We might not have current room state for remote invite/knocks if we are - # the first person on our server to see the room. The best we can do is look - # in the optional stripped state from the invite/knock event. - room_ids_without_results = room_ids.difference( - chain( - room_ids_with_results, - [ - room_id - for room_id, stripped_state_map in room_id_to_stripped_state_map.items() - if stripped_state_map is not None - ], - ) - ) - room_id_to_stripped_state_map.update( - await self._bulk_get_stripped_state_for_rooms_from_sync_room_map( - room_ids_without_results, sync_room_map - ) - ) - - # Update our `room_id_to_content` map based on the stripped state - # (applies to invite/knock rooms) - rooms_ids_without_stripped_state: Set[str] = set() - for room_id in room_ids_without_results: - stripped_state_map = room_id_to_stripped_state_map.get( - room_id, Sentinel.UNSET_SENTINEL - ) - assert stripped_state_map is not Sentinel.UNSET_SENTINEL, ( - f"Stripped state left unset for room {room_id}. " - + "Make sure you're calling `_bulk_get_stripped_state_for_rooms_from_sync_room_map(...)` " - + "with that room_id. (this is a problem with Synapse itself)" - ) - - # If there is some stripped state, we assume the remote server passed *all* - # of the potential stripped state events for the room. - if stripped_state_map is not None: - create_stripped_event = stripped_state_map.get((EventTypes.Create, "")) - stripped_event = stripped_state_map.get((event_type, "")) - # Sanity check that we at-least have the create event - if create_stripped_event is not None: - if stripped_event is not None: - room_id_to_content[room_id] = stripped_event.content.get( - event_content_field - ) - else: - # Didn't see the state event we're looking for in the stripped - # state so we can assume relevant content field is `None`. - room_id_to_content[room_id] = None - else: - rooms_ids_without_stripped_state.add(room_id) - - # Last resort, we might not have current room state for rooms that the - # server has left (no one local is in the room) but we can look at the - # historical state. - # - # Update our `room_id_to_content` map based on the state at the time of - # the membership event. - for room_id in rooms_ids_without_stripped_state: - # TODO: It would be nice to look this up in a bulk way (N+1 queries) - # - # TODO: `get_state_at(...)` doesn't take into account the "current state". - room_state = await self.storage_controllers.state.get_state_at( - room_id=room_id, - stream_position=to_token.copy_and_replace( - StreamKeyType.ROOM, - sync_room_map[room_id].event_pos.to_room_stream_token(), - ), - state_filter=StateFilter.from_types( - [ - (EventTypes.Create, ""), - (event_type, ""), - ] - ), - # Partially-stated rooms should have all state events except for - # remote membership events so we don't need to wait at all because - # we only want the create event and some non-member event. - await_full_state=False, - ) - # We can use the create event as a canary to tell whether the server has - # seen the room before - create_event = room_state.get((EventTypes.Create, "")) - state_event = room_state.get((event_type, "")) - - if create_event is None: - # Skip for unknown rooms - continue - - if state_event is not None: - room_id_to_content[room_id] = state_event.content.get( - event_content_field - ) - else: - # Didn't see the state event we're looking for in the stripped - # state so we can assume relevant content field is `None`. - room_id_to_content[room_id] = None - - return room_id_to_content - - @trace - async def filter_rooms( - self, - user: UserID, - sync_room_map: Dict[str, _RoomMembershipForUser], - filters: SlidingSyncConfig.SlidingSyncList.Filters, - to_token: StreamToken, - ) -> Dict[str, _RoomMembershipForUser]: - """ - Filter rooms based on the sync request. - - Args: - user: User to filter rooms for - sync_room_map: Dictionary of room IDs to sort along with membership - information in the room at the time of `to_token`. - filters: Filters to apply - to_token: We filter based on the state of the room at this token - - Returns: - A filtered dictionary of room IDs along with membership information in the - room at the time of `to_token`. - """ - room_id_to_stripped_state_map: Dict[ - str, Optional[StateMap[StrippedStateEvent]] - ] = {} - - filtered_room_id_set = set(sync_room_map.keys()) - - # Filter for Direct-Message (DM) rooms - if filters.is_dm is not None: - with start_active_span("filters.is_dm"): - if filters.is_dm: - # Only DM rooms please - filtered_room_id_set = { - room_id - for room_id in filtered_room_id_set - if sync_room_map[room_id].is_dm - } - else: - # Only non-DM rooms please - filtered_room_id_set = { - room_id - for room_id in filtered_room_id_set - if not sync_room_map[room_id].is_dm - } - - if filters.spaces is not None: - with start_active_span("filters.spaces"): - raise NotImplementedError() - - # Filter for encrypted rooms - if filters.is_encrypted is not None: - with start_active_span("filters.is_encrypted"): - room_id_to_encryption = ( - await self._bulk_get_partial_current_state_content_for_rooms( - content_type="room_encryption", - room_ids=filtered_room_id_set, - to_token=to_token, - sync_room_map=sync_room_map, - room_id_to_stripped_state_map=room_id_to_stripped_state_map, - ) - ) - - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - encryption = room_id_to_encryption.get( - room_id, ROOM_UNKNOWN_SENTINEL - ) - - # Just remove rooms if we can't determine their encryption status - if encryption is ROOM_UNKNOWN_SENTINEL: - filtered_room_id_set.remove(room_id) - continue - - # If we're looking for encrypted rooms, filter out rooms that are not - # encrypted and vice versa - is_encrypted = encryption is not None - if (filters.is_encrypted and not is_encrypted) or ( - not filters.is_encrypted and is_encrypted - ): - filtered_room_id_set.remove(room_id) - - # Filter for rooms that the user has been invited to - if filters.is_invite is not None: - with start_active_span("filters.is_invite"): - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - room_for_user = sync_room_map[room_id] - # If we're looking for invite rooms, filter out rooms that the user is - # not invited to and vice versa - if ( - filters.is_invite - and room_for_user.membership != Membership.INVITE - ) or ( - not filters.is_invite - and room_for_user.membership == Membership.INVITE - ): - filtered_room_id_set.remove(room_id) - - # Filter by room type (space vs room, etc). A room must match one of the types - # provided in the list. `None` is a valid type for rooms which do not have a - # room type. - if filters.room_types is not None or filters.not_room_types is not None: - with start_active_span("filters.room_types"): - room_id_to_type = ( - await self._bulk_get_partial_current_state_content_for_rooms( - content_type="room_type", - room_ids=filtered_room_id_set, - to_token=to_token, - sync_room_map=sync_room_map, - room_id_to_stripped_state_map=room_id_to_stripped_state_map, - ) - ) - - # Make a copy so we don't run into an error: `Set changed size during - # iteration`, when we filter out and remove items - for room_id in filtered_room_id_set.copy(): - room_type = room_id_to_type.get(room_id, ROOM_UNKNOWN_SENTINEL) - - # Just remove rooms if we can't determine their type - if room_type is ROOM_UNKNOWN_SENTINEL: - filtered_room_id_set.remove(room_id) - continue - - if ( - filters.room_types is not None - and room_type not in filters.room_types - ): - filtered_room_id_set.remove(room_id) - - if ( - filters.not_room_types is not None - and room_type in filters.not_room_types - ): - filtered_room_id_set.remove(room_id) - - if filters.room_name_like is not None: - with start_active_span("filters.room_name_like"): - # TODO: The room name is a bit more sensitive to leak than the - # create/encryption event. Maybe we should consider a better way to fetch - # historical state before implementing this. - # - # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms( - # content_type="room_name", - # room_ids=filtered_room_id_set, - # to_token=to_token, - # sync_room_map=sync_room_map, - # room_id_to_stripped_state_map=room_id_to_stripped_state_map, - # ) - raise NotImplementedError() - - if filters.tags is not None or filters.not_tags is not None: - with start_active_span("filters.tags"): - raise NotImplementedError() - - # Assemble a new sync room map but only with the `filtered_room_id_set` - return {room_id: sync_room_map[room_id] for room_id in filtered_room_id_set} - - @trace - async def sort_rooms( - self, - sync_room_map: Dict[str, _RoomMembershipForUser], - to_token: StreamToken, - ) -> List[_RoomMembershipForUser]: - """ - Sort by `stream_ordering` of the last event that the user should see in the - room. `stream_ordering` is unique so we get a stable sort. - - Args: - sync_room_map: Dictionary of room IDs to sort along with membership - information in the room at the time of `to_token`. - to_token: We sort based on the events in the room at this token (<= `to_token`) - - Returns: - A sorted list of room IDs by `stream_ordering` along with membership information. - """ - - # Assemble a map of room ID to the `stream_ordering` of the last activity that the - # user should see in the room (<= `to_token`) - last_activity_in_room_map: Dict[str, int] = {} - - for room_id, room_for_user in sync_room_map.items(): - if room_for_user.membership != Membership.JOIN: - # If the user has left/been invited/knocked/been banned from a - # room, they shouldn't see anything past that point. - # - # FIXME: It's possible that people should see beyond this point - # in invited/knocked cases if for example the room has - # `invite`/`world_readable` history visibility, see - # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 - last_activity_in_room_map[room_id] = room_for_user.event_pos.stream - - # For fully-joined rooms, we find the latest activity at/before the - # `to_token`. - joined_room_positions = ( - await self.store.bulk_get_last_event_pos_in_room_before_stream_ordering( - [ - room_id - for room_id, room_for_user in sync_room_map.items() - if room_for_user.membership == Membership.JOIN - ], - to_token.room_key, - ) - ) - - last_activity_in_room_map.update(joined_room_positions) - - return sorted( - sync_room_map.values(), - # Sort by the last activity (stream_ordering) in the room - key=lambda room_info: last_activity_in_room_map[room_info.room_id], - # We want descending order - reverse=True, - ) - - @trace - async def get_current_state_ids_at( - self, - room_id: str, - room_membership_for_user_at_to_token: _RoomMembershipForUser, - state_filter: StateFilter, - to_token: StreamToken, - ) -> StateMap[str]: - """ - Get current state IDs for the user in the room according to their membership. This - will be the current state at the time of their LEAVE/BAN, otherwise will be the - current state <= to_token. - - Args: - room_id: The room ID to fetch data for - room_membership_for_user_at_token: Membership information for the user - in the room at the time of `to_token`. - to_token: The point in the stream to sync up to. - """ - state_ids: StateMap[str] - # People shouldn't see past their leave/ban event - if room_membership_for_user_at_to_token.membership in ( - Membership.LEAVE, - Membership.BAN, - ): - # TODO: `get_state_ids_at(...)` doesn't take into account the "current - # state". Maybe we need to use - # `get_forward_extremities_for_room_at_stream_ordering(...)` to "Fetch the - # current state at the time." - state_ids = await self.storage_controllers.state.get_state_ids_at( - room_id, - stream_position=to_token.copy_and_replace( - StreamKeyType.ROOM, - room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), - ), - state_filter=state_filter, - # Partially-stated rooms should have all state events except for - # remote membership events. Since we've already excluded - # partially-stated rooms unless `required_state` only has - # `["m.room.member", "$LAZY"]` for membership, we should be able to - # retrieve everything requested. When we're lazy-loading, if there - # are some remote senders in the timeline, we should also have their - # membership event because we had to auth that timeline event. Plus - # we don't want to block the whole sync waiting for this one room. - await_full_state=False, - ) - # Otherwise, we can get the latest current state in the room - else: - state_ids = await self.storage_controllers.state.get_current_state_ids( - room_id, - state_filter, - # Partially-stated rooms should have all state events except for - # remote membership events. Since we've already excluded - # partially-stated rooms unless `required_state` only has - # `["m.room.member", "$LAZY"]` for membership, we should be able to - # retrieve everything requested. When we're lazy-loading, if there - # are some remote senders in the timeline, we should also have their - # membership event because we had to auth that timeline event. Plus - # we don't want to block the whole sync waiting for this one room. - await_full_state=False, - ) - # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` - - return state_ids - - @trace - async def get_current_state_at( - self, - room_id: str, - room_membership_for_user_at_to_token: _RoomMembershipForUser, - state_filter: StateFilter, - to_token: StreamToken, - ) -> StateMap[EventBase]: - """ - Get current state for the user in the room according to their membership. This - will be the current state at the time of their LEAVE/BAN, otherwise will be the - current state <= to_token. - - Args: - room_id: The room ID to fetch data for - room_membership_for_user_at_token: Membership information for the user - in the room at the time of `to_token`. - to_token: The point in the stream to sync up to. - """ - state_ids = await self.get_current_state_ids_at( - room_id=room_id, - room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, - state_filter=state_filter, - to_token=to_token, - ) - - event_map = await self.store.get_events(list(state_ids.values())) - - state_map = {} - for key, event_id in state_ids.items(): - event = event_map.get(event_id) - if event: - state_map[key] = event - - return state_map - - async def get_room_sync_data( - self, - sync_config: SlidingSyncConfig, - room_id: str, - room_sync_config: RoomSyncConfig, - room_membership_for_user_at_to_token: _RoomMembershipForUser, - from_token: Optional[SlidingSyncStreamToken], - to_token: StreamToken, - ) -> SlidingSyncResult.RoomResult: - """ - Fetch room data for the sync response. - - We fetch data according to the token range (> `from_token` and <= `to_token`). - - Args: - user: User to fetch data for - room_id: The room ID to fetch data for - room_sync_config: Config for what data we should fetch for a room in the - sync response. - room_membership_for_user_at_to_token: Membership information for the user - in the room at the time of `to_token`. - from_token: The point in the stream to sync from. - to_token: The point in the stream to sync up to. - """ - user = sync_config.user - - set_tag( - SynapseTags.FUNC_ARG_PREFIX + "membership", - room_membership_for_user_at_to_token.membership, - ) - set_tag( - SynapseTags.FUNC_ARG_PREFIX + "timeline_limit", - room_sync_config.timeline_limit, - ) - - # Determine whether we should limit the timeline to the token range. - # - # We should return historical messages (before token range) in the - # following cases because we want clients to be able to show a basic - # screen of information: - # - # - Initial sync (because no `from_token` to limit us anyway) - # - When users `newly_joined` - # - For an incremental sync where we haven't sent it down this - # connection before - # - # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917 - from_bound = None - initial = True - if from_token and not room_membership_for_user_at_to_token.newly_joined: - room_status = await self.connection_store.have_sent_room( - sync_config=sync_config, - connection_token=from_token.connection_position, - room_id=room_id, - ) - if room_status.status == HaveSentRoomFlag.LIVE: - from_bound = from_token.stream_token.room_key - initial = False - elif room_status.status == HaveSentRoomFlag.PREVIOUSLY: - assert room_status.last_token is not None - from_bound = room_status.last_token - initial = False - elif room_status.status == HaveSentRoomFlag.NEVER: - from_bound = None - initial = True - else: - assert_never(room_status.status) - - log_kv({"sliding_sync.room_status": room_status}) - - log_kv({"sliding_sync.from_bound": from_bound, "sliding_sync.initial": initial}) - - # Assemble the list of timeline events - # - # FIXME: It would be nice to make the `rooms` response more uniform regardless of - # membership. Currently, we have to make all of these optional because - # `invite`/`knock` rooms only have `stripped_state`. See - # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 - timeline_events: List[EventBase] = [] - bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None - limited: Optional[bool] = None - prev_batch_token: Optional[StreamToken] = None - num_live: Optional[int] = None - if ( - room_sync_config.timeline_limit > 0 - # No timeline for invite/knock rooms (just `stripped_state`) - and room_membership_for_user_at_to_token.membership - not in (Membership.INVITE, Membership.KNOCK) - ): - limited = False - # We want to start off using the `to_token` (vs `from_token`) because we look - # backwards from the `to_token` up to the `timeline_limit` and we might not - # reach the `from_token` before we hit the limit. We will update the room stream - # position once we've fetched the events to point to the earliest event fetched. - prev_batch_token = to_token - - # We're going to paginate backwards from the `to_token` - to_bound = to_token.room_key - # People shouldn't see past their leave/ban event - if room_membership_for_user_at_to_token.membership in ( - Membership.LEAVE, - Membership.BAN, - ): - to_bound = ( - room_membership_for_user_at_to_token.event_pos.to_room_stream_token() - ) - - # For initial `/sync` (and other historical scenarios mentioned above), we - # want to view a historical section of the timeline; to fetch events by - # `topological_ordering` (best representation of the room DAG as others were - # seeing it at the time). This also aligns with the order that `/messages` - # returns events in. - # - # For incremental `/sync`, we want to get all updates for rooms since - # the last `/sync` (regardless if those updates arrived late or happened - # a while ago in the past); to fetch events by `stream_ordering` (in the - # order they were received by the server). - # - # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917 - # - # FIXME: Using workaround for mypy, - # https://github.com/python/mypy/issues/10740#issuecomment-1997047277 and - # https://github.com/python/mypy/issues/17479 - paginate_room_events_by_topological_ordering: PaginateFunction = ( - self.store.paginate_room_events_by_topological_ordering - ) - paginate_room_events_by_stream_ordering: PaginateFunction = ( - self.store.paginate_room_events_by_stream_ordering - ) - pagination_method: PaginateFunction = ( - # Use `topographical_ordering` for historical events - paginate_room_events_by_topological_ordering - if from_bound is None - # Use `stream_ordering` for updates - else paginate_room_events_by_stream_ordering - ) - timeline_events, new_room_key = await pagination_method( - room_id=room_id, - # The bounds are reversed so we can paginate backwards - # (from newer to older events) starting at to_bound. - # This ensures we fill the `limit` with the newest events first, - from_key=to_bound, - to_key=from_bound, - direction=Direction.BACKWARDS, - # We add one so we can determine if there are enough events to saturate - # the limit or not (see `limited`) - limit=room_sync_config.timeline_limit + 1, - ) - - # We want to return the events in ascending order (the last event is the - # most recent). - timeline_events.reverse() - - # Determine our `limited` status based on the timeline. We do this before - # filtering the events so we can accurately determine if there is more to - # paginate even if we filter out some/all events. - if len(timeline_events) > room_sync_config.timeline_limit: - limited = True - # Get rid of that extra "+ 1" event because we only used it to determine - # if we hit the limit or not - timeline_events = timeline_events[-room_sync_config.timeline_limit :] - assert timeline_events[0].internal_metadata.stream_ordering - new_room_key = RoomStreamToken( - stream=timeline_events[0].internal_metadata.stream_ordering - 1 - ) - - # Make sure we don't expose any events that the client shouldn't see - timeline_events = await filter_events_for_client( - self.storage_controllers, - user.to_string(), - timeline_events, - is_peeking=room_membership_for_user_at_to_token.membership - != Membership.JOIN, - filter_send_to_client=True, - ) - # TODO: Filter out `EventTypes.CallInvite` in public rooms, - # see https://github.com/element-hq/synapse/issues/17359 - - # TODO: Handle timeline gaps (`get_timeline_gaps()`) - - # Determine how many "live" events we have (events within the given token range). - # - # This is mostly useful to determine whether a given @mention event should - # make a noise or not. Clients cannot rely solely on the absence of - # `initial: true` to determine live events because if a room not in the - # sliding window bumps into the window because of an @mention it will have - # `initial: true` yet contain a single live event (with potentially other - # old events in the timeline) - num_live = 0 - if from_token is not None: - for timeline_event in reversed(timeline_events): - # This fields should be present for all persisted events - assert timeline_event.internal_metadata.stream_ordering is not None - assert timeline_event.internal_metadata.instance_name is not None - - persisted_position = PersistedEventPosition( - instance_name=timeline_event.internal_metadata.instance_name, - stream=timeline_event.internal_metadata.stream_ordering, - ) - if persisted_position.persisted_after( - from_token.stream_token.room_key - ): - num_live += 1 - else: - # Since we're iterating over the timeline events in - # reverse-chronological order, we can break once we hit an event - # that's not live. In the future, we could potentially optimize - # this more with a binary search (bisect). - break - - # If the timeline is `limited=True`, the client does not have all events - # necessary to calculate aggregations themselves. - if limited: - bundled_aggregations = ( - await self.relations_handler.get_bundled_aggregations( - timeline_events, user.to_string() - ) - ) - - # Update the `prev_batch_token` to point to the position that allows us to - # keep paginating backwards from the oldest event we return in the timeline. - prev_batch_token = prev_batch_token.copy_and_replace( - StreamKeyType.ROOM, new_room_key - ) - - # Figure out any stripped state events for invite/knocks. This allows the - # potential joiner to identify the room. - stripped_state: List[JsonDict] = [] - if room_membership_for_user_at_to_token.membership in ( - Membership.INVITE, - Membership.KNOCK, - ): - # This should never happen. If someone is invited/knocked on room, then - # there should be an event for it. - assert room_membership_for_user_at_to_token.event_id is not None - - invite_or_knock_event = await self.store.get_event( - room_membership_for_user_at_to_token.event_id - ) - - stripped_state = [] - if invite_or_knock_event.membership == Membership.INVITE: - stripped_state.extend( - invite_or_knock_event.unsigned.get("invite_room_state", []) - ) - elif invite_or_knock_event.membership == Membership.KNOCK: - stripped_state.extend( - invite_or_knock_event.unsigned.get("knock_room_state", []) - ) - - stripped_state.append(strip_event(invite_or_knock_event)) - - # TODO: Handle state resets. For example, if we see - # `room_membership_for_user_at_to_token.event_id=None and - # room_membership_for_user_at_to_token.membership is not None`, we should - # indicate to the client that a state reset happened. Perhaps we should indicate - # this by setting `initial: True` and empty `required_state`. - - # Check whether the room has a name set - name_state_ids = await self.get_current_state_ids_at( - room_id=room_id, - room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, - state_filter=StateFilter.from_types([(EventTypes.Name, "")]), - to_token=to_token, - ) - name_event_id = name_state_ids.get((EventTypes.Name, "")) - - room_membership_summary: Mapping[str, MemberSummary] - empty_membership_summary = MemberSummary([], 0) - if room_membership_for_user_at_to_token.membership in ( - Membership.LEAVE, - Membership.BAN, - ): - # TODO: Figure out how to get the membership summary for left/banned rooms - room_membership_summary = {} - else: - room_membership_summary = await self.store.get_room_summary(room_id) - # TODO: Reverse/rewind back to the `to_token` - - # `heroes` are required if the room name is not set. - # - # Note: When you're the first one on your server to be invited to a new room - # over federation, we only have access to some stripped state in - # `event.unsigned.invite_room_state` which currently doesn't include `heroes`, - # see https://github.com/matrix-org/matrix-spec/issues/380. This means that - # clients won't be able to calculate the room name when necessary and just a - # pitfall we have to deal with until that spec issue is resolved. - hero_user_ids: List[str] = [] - # TODO: Should we also check for `EventTypes.CanonicalAlias` - # (`m.room.canonical_alias`) as a fallback for the room name? see - # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 - if name_event_id is None: - hero_user_ids = extract_heroes_from_room_summary( - room_membership_summary, me=user.to_string() - ) - - # Fetch the `required_state` for the room - # - # No `required_state` for invite/knock rooms (just `stripped_state`) - # - # FIXME: It would be nice to make the `rooms` response more uniform regardless - # of membership. Currently, we have to make this optional because - # `invite`/`knock` rooms only have `stripped_state`. See - # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 - # - # Calculate the `StateFilter` based on the `required_state` for the room - required_state_filter = StateFilter.none() - if room_membership_for_user_at_to_token.membership not in ( - Membership.INVITE, - Membership.KNOCK, - ): - # If we have a double wildcard ("*", "*") in the `required_state`, we need - # to fetch all state for the room - # - # Note: MSC3575 describes different behavior to how we're handling things - # here but since it's not wrong to return more state than requested - # (`required_state` is just the minimum requested), it doesn't matter if we - # include more than client wanted. This complexity is also under scrutiny, - # see - # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1185109050 - # - # > One unique exception is when you request all state events via ["*", "*"]. When used, - # > all state events are returned by default, and additional entries FILTER OUT the returned set - # > of state events. These additional entries cannot use '*' themselves. - # > For example, ["*", "*"], ["m.room.member", "@alice:example.com"] will _exclude_ every m.room.member - # > event _except_ for @alice:example.com, and include every other state event. - # > In addition, ["*", "*"], ["m.space.child", "*"] is an error, the m.space.child filter is not - # > required as it would have been returned anyway. - # > - # > -- MSC3575 (https://github.com/matrix-org/matrix-spec-proposals/pull/3575) - if StateValues.WILDCARD in room_sync_config.required_state_map.get( - StateValues.WILDCARD, set() - ): - set_tag( - SynapseTags.FUNC_ARG_PREFIX + "required_state_wildcard", - True, - ) - required_state_filter = StateFilter.all() - # TODO: `StateFilter` currently doesn't support wildcard event types. We're - # currently working around this by returning all state to the client but it - # would be nice to fetch less from the database and return just what the - # client wanted. - elif ( - room_sync_config.required_state_map.get(StateValues.WILDCARD) - is not None - ): - set_tag( - SynapseTags.FUNC_ARG_PREFIX + "required_state_wildcard_event_type", - True, - ) - required_state_filter = StateFilter.all() - else: - required_state_types: List[Tuple[str, Optional[str]]] = [] - for ( - state_type, - state_key_set, - ) in room_sync_config.required_state_map.items(): - num_wild_state_keys = 0 - lazy_load_room_members = False - num_others = 0 - for state_key in state_key_set: - if state_key == StateValues.WILDCARD: - num_wild_state_keys += 1 - # `None` is a wildcard in the `StateFilter` - required_state_types.append((state_type, None)) - # We need to fetch all relevant people when we're lazy-loading membership - elif ( - state_type == EventTypes.Member - and state_key == StateValues.LAZY - ): - lazy_load_room_members = True - # Everyone in the timeline is relevant - timeline_membership: Set[str] = set() - if timeline_events is not None: - for timeline_event in timeline_events: - timeline_membership.add(timeline_event.sender) - - for user_id in timeline_membership: - required_state_types.append( - (EventTypes.Member, user_id) - ) - - # FIXME: We probably also care about invite, ban, kick, targets, etc - # but the spec only mentions "senders". - elif state_key == StateValues.ME: - num_others += 1 - required_state_types.append((state_type, user.to_string())) - else: - num_others += 1 - required_state_types.append((state_type, state_key)) - - set_tag( - SynapseTags.FUNC_ARG_PREFIX - + "required_state_wildcard_state_key_count", - num_wild_state_keys, - ) - set_tag( - SynapseTags.FUNC_ARG_PREFIX + "required_state_lazy", - lazy_load_room_members, - ) - set_tag( - SynapseTags.FUNC_ARG_PREFIX + "required_state_other_count", - num_others, - ) - - required_state_filter = StateFilter.from_types(required_state_types) - - # We need this base set of info for the response so let's just fetch it along - # with the `required_state` for the room - meta_room_state = [(EventTypes.Name, ""), (EventTypes.RoomAvatar, "")] + [ - (EventTypes.Member, hero_user_id) for hero_user_id in hero_user_ids - ] - state_filter = StateFilter.all() - if required_state_filter != StateFilter.all(): - state_filter = StateFilter( - types=StateFilter.from_types( - chain(meta_room_state, required_state_filter.to_types()) - ).types, - include_others=required_state_filter.include_others, - ) - - # We can return all of the state that was requested if this was the first - # time we've sent the room down this connection. - room_state: StateMap[EventBase] = {} - if initial: - room_state = await self.get_current_state_at( - room_id=room_id, - room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, - state_filter=state_filter, - to_token=to_token, - ) - else: - assert from_bound is not None - - # TODO: Limit the number of state events we're about to send down - # the room, if its too many we should change this to an - # `initial=True`? - deltas = await self.store.get_current_state_deltas_for_room( - room_id=room_id, - from_token=from_bound, - to_token=to_token.room_key, - ) - # TODO: Filter room state before fetching events - # TODO: Handle state resets where event_id is None - events = await self.store.get_events( - [d.event_id for d in deltas if d.event_id] - ) - room_state = {(s.type, s.state_key): s for s in events.values()} - - required_room_state: StateMap[EventBase] = {} - if required_state_filter != StateFilter.none(): - required_room_state = required_state_filter.filter_state(room_state) - - # Find the room name and avatar from the state - room_name: Optional[str] = None - # TODO: Should we also check for `EventTypes.CanonicalAlias` - # (`m.room.canonical_alias`) as a fallback for the room name? see - # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 - name_event = room_state.get((EventTypes.Name, "")) - if name_event is not None: - room_name = name_event.content.get("name") - - room_avatar: Optional[str] = None - avatar_event = room_state.get((EventTypes.RoomAvatar, "")) - if avatar_event is not None: - room_avatar = avatar_event.content.get("url") - - # Assemble heroes: extract the info from the state we just fetched - heroes: List[SlidingSyncResult.RoomResult.StrippedHero] = [] - for hero_user_id in hero_user_ids: - member_event = room_state.get((EventTypes.Member, hero_user_id)) - if member_event is not None: - heroes.append( - SlidingSyncResult.RoomResult.StrippedHero( - user_id=hero_user_id, - display_name=member_event.content.get("displayname"), - avatar_url=member_event.content.get("avatar_url"), - ) - ) - - # Figure out the last bump event in the room - last_bump_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, to_token.room_key, event_types=DEFAULT_BUMP_EVENT_TYPES - ) - ) - - # By default, just choose the membership event position - bump_stamp = room_membership_for_user_at_to_token.event_pos.stream - # But if we found a bump event, use that instead - if last_bump_event_result is not None: - _, new_bump_event_pos = last_bump_event_result - - # If we've just joined a remote room, then the last bump event may - # have been backfilled (and so have a negative stream ordering). - # These negative stream orderings can't sensibly be compared, so - # instead we use the membership event position. - if new_bump_event_pos.stream > 0: - bump_stamp = new_bump_event_pos.stream - - set_tag(SynapseTags.RESULT_PREFIX + "initial", initial) - - return SlidingSyncResult.RoomResult( - name=room_name, - avatar=room_avatar, - heroes=heroes, - is_dm=room_membership_for_user_at_to_token.is_dm, - initial=initial, - required_state=list(required_room_state.values()), - timeline_events=timeline_events, - bundled_aggregations=bundled_aggregations, - stripped_state=stripped_state, - prev_batch=prev_batch_token, - limited=limited, - num_live=num_live, - bump_stamp=bump_stamp, - joined_count=room_membership_summary.get( - Membership.JOIN, empty_membership_summary - ).count, - invited_count=room_membership_summary.get( - Membership.INVITE, empty_membership_summary - ).count, - # TODO: These are just dummy values. We could potentially just remove these - # since notifications can only really be done correctly on the client anyway - # (encrypted rooms). - notification_count=0, - highlight_count=0, - ) - - @trace - async def get_extensions_response( - self, - sync_config: SlidingSyncConfig, - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], - actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> SlidingSyncResult.Extensions: - """Handle extension requests. - - Args: - sync_config: Sync configuration - actual_lists: Sliding window API. A map of list key to list results in the - Sliding Sync response. - actual_room_ids: The actual room IDs in the the Sliding Sync response. - actual_room_response_map: A map of room ID to room results in the the - Sliding Sync response. - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. - """ - - if sync_config.extensions is None: - return SlidingSyncResult.Extensions() - - to_device_response = None - if sync_config.extensions.to_device is not None: - to_device_response = await self.get_to_device_extension_response( - sync_config=sync_config, - to_device_request=sync_config.extensions.to_device, - to_token=to_token, - ) - - e2ee_response = None - if sync_config.extensions.e2ee is not None: - e2ee_response = await self.get_e2ee_extension_response( - sync_config=sync_config, - e2ee_request=sync_config.extensions.e2ee, - to_token=to_token, - from_token=from_token, - ) - - account_data_response = None - if sync_config.extensions.account_data is not None: - account_data_response = await self.get_account_data_extension_response( - sync_config=sync_config, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - account_data_request=sync_config.extensions.account_data, - to_token=to_token, - from_token=from_token, - ) - - receipts_response = None - if sync_config.extensions.receipts is not None: - receipts_response = await self.get_receipts_extension_response( - sync_config=sync_config, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - actual_room_response_map=actual_room_response_map, - receipts_request=sync_config.extensions.receipts, - to_token=to_token, - from_token=from_token, - ) - - typing_response = None - if sync_config.extensions.typing is not None: - typing_response = await self.get_typing_extension_response( - sync_config=sync_config, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - actual_room_response_map=actual_room_response_map, - typing_request=sync_config.extensions.typing, - to_token=to_token, - from_token=from_token, - ) - - return SlidingSyncResult.Extensions( - to_device=to_device_response, - e2ee=e2ee_response, - account_data=account_data_response, - receipts=receipts_response, - typing=typing_response, - ) - - def find_relevant_room_ids_for_extension( - self, - requested_lists: Optional[List[str]], - requested_room_ids: Optional[List[str]], - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], - ) -> Set[str]: - """ - Handle the reserved `lists`/`rooms` keys for extensions. Extensions should only - return results for rooms in the Sliding Sync response. This matches up the - requested rooms/lists with the actual lists/rooms in the Sliding Sync response. - - {"lists": []} // Do not process any lists. - {"lists": ["rooms", "dms"]} // Process only a subset of lists. - {"lists": ["*"]} // Process all lists defined in the Sliding Window API. (This is the default.) - - {"rooms": []} // Do not process any specific rooms. - {"rooms": ["!a:b", "!c:d"]} // Process only a subset of room subscriptions. - {"rooms": ["*"]} // Process all room subscriptions defined in the Room Subscription API. (This is the default.) - - Args: - requested_lists: The `lists` from the extension request. - requested_room_ids: The `rooms` from the extension request. - actual_lists: The actual lists from the Sliding Sync response. - actual_room_ids: The actual room subscriptions from the Sliding Sync request. - """ - - # We only want to include account data for rooms that are already in the sliding - # sync response AND that were requested in the account data request. - relevant_room_ids: Set[str] = set() - - # See what rooms from the room subscriptions we should get account data for - if requested_room_ids is not None: - for room_id in requested_room_ids: - # A wildcard means we process all rooms from the room subscriptions - if room_id == "*": - relevant_room_ids.update(actual_room_ids) - break - - if room_id in actual_room_ids: - relevant_room_ids.add(room_id) - - # See what rooms from the sliding window lists we should get account data for - if requested_lists is not None: - for list_key in requested_lists: - # Just some typing because we share the variable name in multiple places - actual_list: Optional[SlidingSyncResult.SlidingWindowList] = None - - # A wildcard means we process rooms from all lists - if list_key == "*": - for actual_list in actual_lists.values(): - # We only expect a single SYNC operation for any list - assert len(actual_list.ops) == 1 - sync_op = actual_list.ops[0] - assert sync_op.op == OperationType.SYNC - - relevant_room_ids.update(sync_op.room_ids) - - break - - actual_list = actual_lists.get(list_key) - if actual_list is not None: - # We only expect a single SYNC operation for any list - assert len(actual_list.ops) == 1 - sync_op = actual_list.ops[0] - assert sync_op.op == OperationType.SYNC - - relevant_room_ids.update(sync_op.room_ids) - - return relevant_room_ids - - @trace - async def get_to_device_extension_response( - self, - sync_config: SlidingSyncConfig, - to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension, - to_token: StreamToken, - ) -> Optional[SlidingSyncResult.Extensions.ToDeviceExtension]: - """Handle to-device extension (MSC3885) - - Args: - sync_config: Sync configuration - to_device_request: The to-device extension from the request - to_token: The point in the stream to sync up to. - """ - user_id = sync_config.user.to_string() - device_id = sync_config.requester.device_id - - # Skip if the extension is not enabled - if not to_device_request.enabled: - return None - - # Check that this request has a valid device ID (not all requests have - # to belong to a device, and so device_id is None) - if device_id is None: - return SlidingSyncResult.Extensions.ToDeviceExtension( - next_batch=f"{to_token.to_device_key}", - events=[], - ) - - since_stream_id = 0 - if to_device_request.since is not None: - # We've already validated this is an int. - since_stream_id = int(to_device_request.since) - - if to_token.to_device_key < since_stream_id: - # The since token is ahead of our current token, so we return an - # empty response. - logger.warning( - "Got to-device.since from the future. since token: %r is ahead of our current to_device stream position: %r", - since_stream_id, - to_token.to_device_key, - ) - return SlidingSyncResult.Extensions.ToDeviceExtension( - next_batch=to_device_request.since, - events=[], - ) - - # Delete everything before the given since token, as we know the - # device must have received them. - deleted = await self.store.delete_messages_for_device( - user_id=user_id, - device_id=device_id, - up_to_stream_id=since_stream_id, - ) - - logger.debug( - "Deleted %d to-device messages up to %d for %s", - deleted, - since_stream_id, - user_id, - ) - - messages, stream_id = await self.store.get_messages_for_device( - user_id=user_id, - device_id=device_id, - from_stream_id=since_stream_id, - to_stream_id=to_token.to_device_key, - limit=min(to_device_request.limit, 100), # Limit to at most 100 events - ) - - return SlidingSyncResult.Extensions.ToDeviceExtension( - next_batch=f"{stream_id}", - events=messages, - ) - - @trace - async def get_e2ee_extension_response( - self, - sync_config: SlidingSyncConfig, - e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension, - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]: - """Handle E2EE device extension (MSC3884) - - Args: - sync_config: Sync configuration - e2ee_request: The e2ee extension from the request - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. - """ - user_id = sync_config.user.to_string() - device_id = sync_config.requester.device_id - - # Skip if the extension is not enabled - if not e2ee_request.enabled: - return None - - device_list_updates: Optional[DeviceListUpdates] = None - if from_token is not None: - # TODO: This should take into account the `from_token` and `to_token` - device_list_updates = await self.device_handler.get_user_ids_changed( - user_id=user_id, - from_token=from_token.stream_token, - ) - - device_one_time_keys_count: Mapping[str, int] = {} - device_unused_fallback_key_types: Sequence[str] = [] - if device_id: - # TODO: We should have a way to let clients differentiate between the states of: - # * no change in OTK count since the provided since token - # * the server has zero OTKs left for this device - # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 - device_one_time_keys_count = await self.store.count_e2e_one_time_keys( - user_id, device_id - ) - device_unused_fallback_key_types = ( - await self.store.get_e2e_unused_fallback_key_types(user_id, device_id) - ) - - return SlidingSyncResult.Extensions.E2eeExtension( - device_list_updates=device_list_updates, - device_one_time_keys_count=device_one_time_keys_count, - device_unused_fallback_key_types=device_unused_fallback_key_types, - ) - - @trace - async def get_account_data_extension_response( - self, - sync_config: SlidingSyncConfig, - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], - account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension, - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.AccountDataExtension]: - """Handle Account Data extension (MSC3959) - - Args: - sync_config: Sync configuration - actual_lists: Sliding window API. A map of list key to list results in the - Sliding Sync response. - actual_room_ids: The actual room IDs in the the Sliding Sync response. - account_data_request: The account_data extension from the request - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. - """ - user_id = sync_config.user.to_string() - - # Skip if the extension is not enabled - if not account_data_request.enabled: - return None - - global_account_data_map: Mapping[str, JsonMapping] = {} - if from_token is not None: - # TODO: This should take into account the `from_token` and `to_token` - global_account_data_map = ( - await self.store.get_updated_global_account_data_for_user( - user_id, from_token.stream_token.account_data_key - ) - ) - - have_push_rules_changed = await self.store.have_push_rules_changed_for_user( - user_id, from_token.stream_token.push_rules_key - ) - if have_push_rules_changed: - global_account_data_map = dict(global_account_data_map) - # TODO: This should take into account the `from_token` and `to_token` - global_account_data_map[AccountDataTypes.PUSH_RULES] = ( - await self.push_rules_handler.push_rules_for_user(sync_config.user) - ) - else: - # TODO: This should take into account the `to_token` - all_global_account_data = await self.store.get_global_account_data_for_user( - user_id - ) - - global_account_data_map = dict(all_global_account_data) - # TODO: This should take into account the `to_token` - global_account_data_map[AccountDataTypes.PUSH_RULES] = ( - await self.push_rules_handler.push_rules_for_user(sync_config.user) - ) - - # Fetch room account data - account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] = {} - relevant_room_ids = self.find_relevant_room_ids_for_extension( - requested_lists=account_data_request.lists, - requested_room_ids=account_data_request.rooms, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - ) - if len(relevant_room_ids) > 0: - if from_token is not None: - # TODO: This should take into account the `from_token` and `to_token` - account_data_by_room_map = ( - await self.store.get_updated_room_account_data_for_user( - user_id, from_token.stream_token.account_data_key - ) - ) - else: - # TODO: This should take into account the `to_token` - account_data_by_room_map = ( - await self.store.get_room_account_data_for_user(user_id) - ) - - # Filter down to the relevant rooms - account_data_by_room_map = { - room_id: account_data_map - for room_id, account_data_map in account_data_by_room_map.items() - if room_id in relevant_room_ids - } - - return SlidingSyncResult.Extensions.AccountDataExtension( - global_account_data_map=global_account_data_map, - account_data_by_room_map=account_data_by_room_map, - ) - - async def get_receipts_extension_response( - self, - sync_config: SlidingSyncConfig, - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], - actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], - receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension, - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.ReceiptsExtension]: - """Handle Receipts extension (MSC3960) - - Args: - sync_config: Sync configuration - actual_lists: Sliding window API. A map of list key to list results in the - Sliding Sync response. - actual_room_ids: The actual room IDs in the the Sliding Sync response. - actual_room_response_map: A map of room ID to room results in the the - Sliding Sync response. - account_data_request: The account_data extension from the request - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. - """ - # Skip if the extension is not enabled - if not receipts_request.enabled: - return None - - relevant_room_ids = self.find_relevant_room_ids_for_extension( - requested_lists=receipts_request.lists, - requested_room_ids=receipts_request.rooms, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - ) - - room_id_to_receipt_map: Dict[str, JsonMapping] = {} - if len(relevant_room_ids) > 0: - # TODO: Take connection tracking into account so that when a room comes back - # into range we can send the receipts that were missed. - receipt_source = self.event_sources.sources.receipt - receipts, _ = await receipt_source.get_new_events( - user=sync_config.user, - from_key=( - from_token.stream_token.receipt_key - if from_token - else MultiWriterStreamToken(stream=0) - ), - to_key=to_token.receipt_key, - # This is a dummy value and isn't used in the function - limit=0, - room_ids=relevant_room_ids, - is_guest=False, - ) - - for receipt in receipts: - # These fields should exist for every receipt - room_id = receipt["room_id"] - type = receipt["type"] - content = receipt["content"] - - # For `inital: True` rooms, we only want to include receipts for events - # in the timeline. - room_result = actual_room_response_map.get(room_id) - if room_result is not None: - if room_result.initial: - # TODO: In the future, it would be good to fetch less receipts - # out of the database in the first place but we would need to - # add a new `event_id` index to `receipts_linearized`. - relevant_event_ids = [ - event.event_id for event in room_result.timeline_events - ] - - assert isinstance(content, dict) - content = { - event_id: content_value - for event_id, content_value in content.items() - if event_id in relevant_event_ids - } - - room_id_to_receipt_map[room_id] = {"type": type, "content": content} - - return SlidingSyncResult.Extensions.ReceiptsExtension( - room_id_to_receipt_map=room_id_to_receipt_map, - ) - - async def get_typing_extension_response( - self, - sync_config: SlidingSyncConfig, - actual_lists: Dict[str, SlidingSyncResult.SlidingWindowList], - actual_room_ids: Set[str], - actual_room_response_map: Dict[str, SlidingSyncResult.RoomResult], - typing_request: SlidingSyncConfig.Extensions.TypingExtension, - to_token: StreamToken, - from_token: Optional[SlidingSyncStreamToken], - ) -> Optional[SlidingSyncResult.Extensions.TypingExtension]: - """Handle Typing Notification extension (MSC3961) - - Args: - sync_config: Sync configuration - actual_lists: Sliding window API. A map of list key to list results in the - Sliding Sync response. - actual_room_ids: The actual room IDs in the the Sliding Sync response. - actual_room_response_map: A map of room ID to room results in the the - Sliding Sync response. - account_data_request: The account_data extension from the request - to_token: The point in the stream to sync up to. - from_token: The point in the stream to sync from. - """ - # Skip if the extension is not enabled - if not typing_request.enabled: - return None - - relevant_room_ids = self.find_relevant_room_ids_for_extension( - requested_lists=typing_request.lists, - requested_room_ids=typing_request.rooms, - actual_lists=actual_lists, - actual_room_ids=actual_room_ids, - ) - - room_id_to_typing_map: Dict[str, JsonMapping] = {} - if len(relevant_room_ids) > 0: - # Note: We don't need to take connection tracking into account for typing - # notifications because they'll get anything still relevant and hasn't timed - # out when the room comes into range. We consider the gap where the room - # fell out of range, as long enough for any typing notifications to have - # timed out (it's not worth the 30 seconds of data we may have missed). - typing_source = self.event_sources.sources.typing - typing_notifications, _ = await typing_source.get_new_events( - user=sync_config.user, - from_key=(from_token.stream_token.typing_key if from_token else 0), - to_key=to_token.typing_key, - # This is a dummy value and isn't used in the function - limit=0, - room_ids=relevant_room_ids, - is_guest=False, - ) - - for typing_notification in typing_notifications: - # These fields should exist for every typing notification - room_id = typing_notification["room_id"] - type = typing_notification["type"] - content = typing_notification["content"] - - room_id_to_typing_map[room_id] = {"type": type, "content": content} - - return SlidingSyncResult.Extensions.TypingExtension( - room_id_to_typing_map=room_id_to_typing_map, - ) - - -class HaveSentRoomFlag(Enum): - """Flag for whether we have sent the room down a sliding sync connection. - - The valid state changes here are: - NEVER -> LIVE - LIVE -> PREVIOUSLY - PREVIOUSLY -> LIVE - """ - - # The room has never been sent down (or we have forgotten we have sent it - # down). - NEVER = 1 - - # We have previously sent the room down, but there are updates that we - # haven't sent down. - PREVIOUSLY = 2 - - # We have sent the room down and the client has received all updates. - LIVE = 3 - - -@attr.s(auto_attribs=True, slots=True, frozen=True) -class HaveSentRoom: - """Whether we have sent the room down a sliding sync connection. - - Attributes: - status: Flag of if we have or haven't sent down the room - last_token: If the flag is `PREVIOUSLY` then this is non-null and - contains the last stream token of the last updates we sent down - the room, i.e. we still need to send everything since then to the - client. - """ - - status: HaveSentRoomFlag - last_token: Optional[RoomStreamToken] - - @staticmethod - def previously(last_token: RoomStreamToken) -> "HaveSentRoom": - """Constructor for `PREVIOUSLY` flag.""" - return HaveSentRoom(HaveSentRoomFlag.PREVIOUSLY, last_token) - - -HAVE_SENT_ROOM_NEVER = HaveSentRoom(HaveSentRoomFlag.NEVER, None) -HAVE_SENT_ROOM_LIVE = HaveSentRoom(HaveSentRoomFlag.LIVE, None) - - -@attr.s(auto_attribs=True) -class SlidingSyncConnectionStore: - """In-memory store of per-connection state, including what rooms we have - previously sent down a sliding sync connection. - - Note: This is NOT safe to run in a worker setup because connection positions will - point to different sets of rooms on different workers. e.g. for the same connection, - a connection position of 5 might have totally different states on worker A and - worker B. - - One complication that we need to deal with here is needing to handle requests being - resent, i.e. if we sent down a room in a response that the client received, we must - consider the room *not* sent when we get the request again. - - This is handled by using an integer "token", which is returned to the client - as part of the sync token. For each connection we store a mapping from - tokens to the room states, and create a new entry when we send down new - rooms. - - Note that for any given sliding sync connection we will only store a maximum - of two different tokens: the previous token from the request and a new token - sent in the response. When we receive a request with a given token, we then - clear out all other entries with a different token. - - Attributes: - _connections: Mapping from `(user_id, conn_id)` to mapping of `token` - to mapping of room ID to `HaveSentRoom`. - """ - - # `(user_id, conn_id)` -> `token` -> `room_id` -> `HaveSentRoom` - _connections: Dict[Tuple[str, str], Dict[int, Dict[str, HaveSentRoom]]] = ( - attr.Factory(dict) - ) - - async def is_valid_token( - self, sync_config: SlidingSyncConfig, connection_token: int - ) -> bool: - """Return whether the connection token is valid/recognized""" - if connection_token == 0: - return True - - conn_key = self._get_connection_key(sync_config) - return connection_token in self._connections.get(conn_key, {}) - - async def have_sent_room( - self, sync_config: SlidingSyncConfig, connection_token: int, room_id: str - ) -> HaveSentRoom: - """For the given user_id/conn_id/token, return whether we have - previously sent the room down - """ - - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.setdefault(conn_key, {}) - room_status = sync_statuses.get(connection_token, {}).get( - room_id, HAVE_SENT_ROOM_NEVER - ) - - return room_status - - @trace - async def record_rooms( - self, - sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken], - *, - sent_room_ids: StrCollection, - unsent_room_ids: StrCollection, - ) -> int: - """Record which rooms we have/haven't sent down in a new response - - Attributes: - sync_config - from_token: The since token from the request, if any - sent_room_ids: The set of room IDs that we have sent down as - part of this request (only needs to be ones we didn't - previously sent down). - unsent_room_ids: The set of room IDs that have had updates - since the `from_token`, but which were not included in - this request - """ - prev_connection_token = 0 - if from_token is not None: - prev_connection_token = from_token.connection_position - - # If there are no changes then this is a noop. - if not sent_room_ids and not unsent_room_ids: - return prev_connection_token - - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.setdefault(conn_key, {}) - - # Generate a new token, removing any existing entries in that token - # (which can happen if requests get resent). - new_store_token = prev_connection_token + 1 - sync_statuses.pop(new_store_token, None) - - # Copy over and update the room mappings. - new_room_statuses = dict(sync_statuses.get(prev_connection_token, {})) - - # Whether we have updated the `new_room_statuses`, if we don't by the - # end we can treat this as a noop. - have_updated = False - for room_id in sent_room_ids: - new_room_statuses[room_id] = HAVE_SENT_ROOM_LIVE - have_updated = True - - # Whether we add/update the entries for unsent rooms depends on the - # existing entry: - # - LIVE: We have previously sent down everything up to - # `last_room_token, so we update the entry to be `PREVIOUSLY` with - # `last_room_token`. - # - PREVIOUSLY: We have previously sent down everything up to *a* - # given token, so we don't need to update the entry. - # - NEVER: We have never previously sent down the room, and we haven't - # sent anything down this time either so we leave it as NEVER. - - # Work out the new state for unsent rooms that were `LIVE`. - if from_token: - new_unsent_state = HaveSentRoom.previously(from_token.stream_token.room_key) - else: - new_unsent_state = HAVE_SENT_ROOM_NEVER - - for room_id in unsent_room_ids: - prev_state = new_room_statuses.get(room_id) - if prev_state is not None and prev_state.status == HaveSentRoomFlag.LIVE: - new_room_statuses[room_id] = new_unsent_state - have_updated = True - - if not have_updated: - return prev_connection_token - - sync_statuses[new_store_token] = new_room_statuses - - return new_store_token - - @trace - async def mark_token_seen( - self, - sync_config: SlidingSyncConfig, - from_token: Optional[SlidingSyncStreamToken], - ) -> None: - """We have received a request with the given token, so we can clear out - any other tokens associated with the connection. - - If there is no from token then we have started afresh, and so we delete - all tokens associated with the device. - """ - # Clear out any tokens for the connection that doesn't match the one - # from the request. - - conn_key = self._get_connection_key(sync_config) - sync_statuses = self._connections.pop(conn_key, {}) - if from_token is None: - return - - sync_statuses = { - connection_token: room_statuses - for connection_token, room_statuses in sync_statuses.items() - if connection_token == from_token.connection_position - } - if sync_statuses: - self._connections[conn_key] = sync_statuses - - @staticmethod - def _get_connection_key(sync_config: SlidingSyncConfig) -> Tuple[str, str]: - """Return a unique identifier for this connection. - - The first part is simply the user ID. - - The second part is generally a combination of device ID and conn_id. - However, both these two are optional (e.g. puppet access tokens don't - have device IDs), so this handles those edge cases. - - We use this over the raw `conn_id` to avoid clashes between different - clients that use the same `conn_id`. Imagine a user uses a web client - that uses `conn_id: main_sync_loop` and an Android client that also has - a `conn_id: main_sync_loop`. - """ - - user_id = sync_config.user.to_string() - - # Only one sliding sync connection is allowed per given conn_id (empty - # or not). - conn_id = sync_config.conn_id or "" - - if sync_config.requester.device_id: - return (user_id, f"D/{sync_config.requester.device_id}/{conn_id}") - - if sync_config.requester.access_token_id: - # If we don't have a device, then the access token ID should be a - # stable ID. - return (user_id, f"A/{sync_config.requester.access_token_id}/{conn_id}") - - # If we have neither then its likely an AS or some weird token. Either - # way we can just fail here. - raise Exception("Cannot use sliding sync with access token type") diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py new file mode 100644
index 0000000000..cb56eb53fc --- /dev/null +++ b/synapse/handlers/sliding_sync/__init__.py
@@ -0,0 +1,1691 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +import itertools +import logging +from itertools import chain +from typing import TYPE_CHECKING, AbstractSet, Dict, List, Mapping, Optional, Set, Tuple + +from prometheus_client import Histogram +from typing_extensions import assert_never + +from synapse.api.constants import Direction, EventTypes, Membership +from synapse.events import EventBase +from synapse.events.utils import strip_event +from synapse.handlers.relations import BundledAggregations +from synapse.handlers.sliding_sync.extensions import SlidingSyncExtensionHandler +from synapse.handlers.sliding_sync.room_lists import ( + RoomsForUserType, + SlidingSyncRoomLists, +) +from synapse.handlers.sliding_sync.store import SlidingSyncConnectionStore +from synapse.logging.opentracing import ( + SynapseTags, + log_kv, + set_tag, + start_active_span, + tag_args, + trace, +) +from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary +from synapse.storage.databases.main.state_deltas import StateDelta +from synapse.storage.databases.main.stream import PaginateFunction +from synapse.storage.roommember import ( + MemberSummary, +) +from synapse.types import ( + JsonDict, + MutableStateMap, + PersistedEventPosition, + Requester, + RoomStreamToken, + SlidingSyncStreamToken, + StateMap, + StrCollection, + StreamKeyType, + StreamToken, +) +from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES +from synapse.types.handlers.sliding_sync import ( + HaveSentRoomFlag, + MutablePerConnectionState, + PerConnectionState, + RoomSyncConfig, + SlidingSyncConfig, + SlidingSyncResult, + StateValues, +) +from synapse.types.state import StateFilter +from synapse.util.async_helpers import concurrently_execute +from synapse.visibility import filter_events_for_client + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +sync_processing_time = Histogram( + "synapse_sliding_sync_processing_time", + "Time taken to generate a sliding sync response, ignoring wait times.", + ["initial"], +) + +# Limit the number of state_keys we should remember sending down the connection for each +# (room_id, user_id). We don't want to store and pull out too much data in the database. +# +# 100 is an arbitrary but small-ish number. The idea is that we probably won't send down +# too many redundant member state events (that the client already knows about) for a +# given ongoing conversation if we keep 100 around. Most rooms don't have 100 members +# anyway and it takes a while to cycle through 100 members. +MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER = 100 + + +class SlidingSyncHandler: + def __init__(self, hs: "HomeServer"): + self.clock = hs.get_clock() + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + self.auth_blocking = hs.get_auth_blocking() + self.notifier = hs.get_notifier() + self.event_sources = hs.get_event_sources() + self.relations_handler = hs.get_relations_handler() + self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync + self.is_mine_id = hs.is_mine_id + + self.connection_store = SlidingSyncConnectionStore(self.store) + self.extensions = SlidingSyncExtensionHandler(hs) + self.room_lists = SlidingSyncRoomLists(hs) + + async def wait_for_sync_for_user( + self, + requester: Requester, + sync_config: SlidingSyncConfig, + from_token: Optional[SlidingSyncStreamToken] = None, + timeout_ms: int = 0, + ) -> SlidingSyncResult: + """ + Get the sync for a client if we have new data for it now. Otherwise + wait for new data to arrive on the server. If the timeout expires, then + return an empty sync result. + + Args: + requester: The user making the request + sync_config: Sync configuration + from_token: The point in the stream to sync from. Token of the end of the + previous batch. May be `None` if this is the initial sync request. + timeout_ms: The time in milliseconds to wait for new data to arrive. If 0, + we will immediately but there might not be any new data so we just return an + empty response. + """ + # If the user is not part of the mau group, then check that limits have + # not been exceeded (if not part of the group by this point, almost certain + # auth_blocking will occur) + await self.auth_blocking.check_auth_blocking(requester=requester) + + # If we're working with a user-provided token, we need to make sure to wait for + # this worker to catch up with the token so we don't skip past any incoming + # events or future events if the user is nefariously, manually modifying the + # token. + if from_token is not None: + # We need to make sure this worker has caught up with the token. If + # this returns false, it means we timed out waiting, and we should + # just return an empty response. + before_wait_ts = self.clock.time_msec() + if not await self.notifier.wait_for_stream_token(from_token.stream_token): + logger.warning( + "Timed out waiting for worker to catch up. Returning empty response" + ) + return SlidingSyncResult.empty(from_token) + + # If we've spent significant time waiting to catch up, take it off + # the timeout. + after_wait_ts = self.clock.time_msec() + if after_wait_ts - before_wait_ts > 1_000: + timeout_ms -= after_wait_ts - before_wait_ts + timeout_ms = max(timeout_ms, 0) + + # We're going to respond immediately if the timeout is 0 or if this is an + # initial sync (without a `from_token`) so we can avoid calling + # `notifier.wait_for_events()`. + if timeout_ms == 0 or from_token is None: + now_token = self.event_sources.get_current_token() + result = await self.current_sync_for_user( + sync_config, + from_token=from_token, + to_token=now_token, + ) + else: + # Otherwise, we wait for something to happen and report it to the user. + async def current_sync_callback( + before_token: StreamToken, after_token: StreamToken + ) -> SlidingSyncResult: + return await self.current_sync_for_user( + sync_config, + from_token=from_token, + to_token=after_token, + ) + + result = await self.notifier.wait_for_events( + sync_config.user.to_string(), + timeout_ms, + current_sync_callback, + from_token=from_token.stream_token, + ) + + return result + + @trace + async def current_sync_for_user( + self, + sync_config: SlidingSyncConfig, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken] = None, + ) -> SlidingSyncResult: + """ + Generates the response body of a Sliding Sync result, represented as a + `SlidingSyncResult`. + + We fetch data according to the token range (> `from_token` and <= `to_token`). + + Args: + sync_config: Sync configuration + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. Token of the end of the + previous batch. May be `None` if this is the initial sync request. + """ + start_time_s = self.clock.time() + + user_id = sync_config.user.to_string() + app_service = self.store.get_app_service_by_user_id(user_id) + if app_service: + # We no longer support AS users using /sync directly. + # See https://github.com/matrix-org/matrix-doc/issues/1144 + raise NotImplementedError() + + # Get the per-connection state (if any). + # + # Raises an exception if there is a `connection_position` that we don't + # recognize. If we don't do this and the client asks for the full range + # of rooms, we end up sending down all rooms and their state from + # scratch (which can be very slow). By expiring the connection we allow + # the client a chance to do an initial request with a smaller range of + # rooms to get them some results sooner but will end up taking the same + # amount of time (more with round-trips and re-processing) in the end to + # get everything again. + previous_connection_state = ( + await self.connection_store.get_and_clear_connection_positions( + sync_config, from_token + ) + ) + + # Get all of the room IDs that the user should be able to see in the sync + # response + has_lists = sync_config.lists is not None and len(sync_config.lists) > 0 + has_room_subscriptions = ( + sync_config.room_subscriptions is not None + and len(sync_config.room_subscriptions) > 0 + ) + + interested_rooms = await self.room_lists.compute_interested_rooms( + sync_config=sync_config, + previous_connection_state=previous_connection_state, + from_token=from_token.stream_token if from_token else None, + to_token=to_token, + ) + + lists = interested_rooms.lists + relevant_room_map = interested_rooms.relevant_room_map + all_rooms = interested_rooms.all_rooms + room_membership_for_user_map = interested_rooms.room_membership_for_user_map + relevant_rooms_to_send_map = interested_rooms.relevant_rooms_to_send_map + + # Fetch room data + rooms: Dict[str, SlidingSyncResult.RoomResult] = {} + + new_connection_state = previous_connection_state.get_mutable() + + @trace + @tag_args + async def handle_room(room_id: str) -> None: + room_sync_result = await self.get_room_sync_data( + sync_config=sync_config, + previous_connection_state=previous_connection_state, + new_connection_state=new_connection_state, + room_id=room_id, + room_sync_config=relevant_rooms_to_send_map[room_id], + room_membership_for_user_at_to_token=room_membership_for_user_map[ + room_id + ], + from_token=from_token, + to_token=to_token, + newly_joined=room_id in interested_rooms.newly_joined_rooms, + newly_left=room_id in interested_rooms.newly_left_rooms, + is_dm=room_id in interested_rooms.dm_room_ids, + ) + + # Filter out empty room results during incremental sync + if room_sync_result or not from_token: + rooms[room_id] = room_sync_result + + if relevant_rooms_to_send_map: + with start_active_span("sliding_sync.generate_room_entries"): + await concurrently_execute(handle_room, relevant_rooms_to_send_map, 20) + + extensions = await self.extensions.get_extensions_response( + sync_config=sync_config, + actual_lists=lists, + previous_connection_state=previous_connection_state, + new_connection_state=new_connection_state, + # We're purposely using `relevant_room_map` instead of + # `relevant_rooms_to_send_map` here. This needs to be all room_ids we could + # send regardless of whether they have an event update or not. The + # extensions care about more than just normal events in the rooms (like + # account data, read receipts, typing indicators, to-device messages, etc). + actual_room_ids=set(relevant_room_map.keys()), + actual_room_response_map=rooms, + from_token=from_token, + to_token=to_token, + ) + + if has_lists or has_room_subscriptions: + # We now calculate if any rooms outside the range have had updates, + # which we are not sending down. + # + # We *must* record rooms that have had updates, but it is also fine + # to record rooms as having updates even if there might not actually + # be anything new for the user (e.g. due to event filters, events + # having happened after the user left, etc). + if from_token: + # The set of rooms that the client (may) care about, but aren't + # in any list range (or subscribed to). + missing_rooms = all_rooms - relevant_room_map.keys() + + # We now just go and try fetching any events in the above rooms + # to see if anything has happened since the `from_token`. + # + # TODO: Replace this with something faster. When we land the + # sliding sync tables that record the most recent event + # positions we can use that. + unsent_room_ids: StrCollection + if await self.store.have_finished_sliding_sync_background_jobs(): + unsent_room_ids = await ( + self.store.get_rooms_that_have_updates_since_sliding_sync_table( + room_ids=missing_rooms, + from_key=from_token.stream_token.room_key, + ) + ) + else: + missing_event_map_by_room = ( + await self.store.get_room_events_stream_for_rooms( + room_ids=missing_rooms, + from_key=to_token.room_key, + to_key=from_token.stream_token.room_key, + limit=1, + ) + ) + unsent_room_ids = list(missing_event_map_by_room) + + new_connection_state.rooms.record_unsent_rooms( + unsent_room_ids, from_token.stream_token.room_key + ) + + new_connection_state.rooms.record_sent_rooms( + relevant_rooms_to_send_map.keys() + ) + + connection_position = await self.connection_store.record_new_state( + sync_config=sync_config, + from_token=from_token, + new_connection_state=new_connection_state, + ) + elif from_token: + connection_position = from_token.connection_position + else: + # Initial sync without a `from_token` starts at `0` + connection_position = 0 + + sliding_sync_result = SlidingSyncResult( + next_pos=SlidingSyncStreamToken(to_token, connection_position), + lists=lists, + rooms=rooms, + extensions=extensions, + ) + + # Make it easy to find traces for syncs that aren't empty + set_tag(SynapseTags.RESULT_PREFIX + "result", bool(sliding_sync_result)) + set_tag(SynapseTags.FUNC_ARG_PREFIX + "sync_config.user", user_id) + + end_time_s = self.clock.time() + sync_processing_time.labels(from_token is not None).observe( + end_time_s - start_time_s + ) + + return sliding_sync_result + + @trace + async def get_current_state_ids_at( + self, + room_id: str, + room_membership_for_user_at_to_token: RoomsForUserType, + state_filter: StateFilter, + to_token: StreamToken, + ) -> StateMap[str]: + """ + Get current state IDs for the user in the room according to their membership. This + will be the current state at the time of their LEAVE/BAN, otherwise will be the + current state <= to_token. + + Args: + room_id: The room ID to fetch data for + room_membership_for_user_at_token: Membership information for the user + in the room at the time of `to_token`. + to_token: The point in the stream to sync up to. + """ + state_ids: StateMap[str] + # People shouldn't see past their leave/ban event + if room_membership_for_user_at_to_token.membership in ( + Membership.LEAVE, + Membership.BAN, + ): + # TODO: `get_state_ids_at(...)` doesn't take into account the "current + # state". Maybe we need to use + # `get_forward_extremities_for_room_at_stream_ordering(...)` to "Fetch the + # current state at the time." + state_ids = await self.storage_controllers.state.get_state_ids_at( + room_id, + stream_position=to_token.copy_and_replace( + StreamKeyType.ROOM, + room_membership_for_user_at_to_token.event_pos.to_room_stream_token(), + ), + state_filter=state_filter, + # Partially-stated rooms should have all state events except for + # remote membership events. Since we've already excluded + # partially-stated rooms unless `required_state` only has + # `["m.room.member", "$LAZY"]` for membership, we should be able to + # retrieve everything requested. When we're lazy-loading, if there + # are some remote senders in the timeline, we should also have their + # membership event because we had to auth that timeline event. Plus + # we don't want to block the whole sync waiting for this one room. + await_full_state=False, + ) + # Otherwise, we can get the latest current state in the room + else: + state_ids = await self.storage_controllers.state.get_current_state_ids( + room_id, + state_filter, + # Partially-stated rooms should have all state events except for + # remote membership events. Since we've already excluded + # partially-stated rooms unless `required_state` only has + # `["m.room.member", "$LAZY"]` for membership, we should be able to + # retrieve everything requested. When we're lazy-loading, if there + # are some remote senders in the timeline, we should also have their + # membership event because we had to auth that timeline event. Plus + # we don't want to block the whole sync waiting for this one room. + await_full_state=False, + ) + # TODO: Query `current_state_delta_stream` and reverse/rewind back to the `to_token` + + return state_ids + + @trace + async def get_current_state_at( + self, + room_id: str, + room_membership_for_user_at_to_token: RoomsForUserType, + state_filter: StateFilter, + to_token: StreamToken, + ) -> StateMap[EventBase]: + """ + Get current state for the user in the room according to their membership. This + will be the current state at the time of their LEAVE/BAN, otherwise will be the + current state <= to_token. + + Args: + room_id: The room ID to fetch data for + room_membership_for_user_at_token: Membership information for the user + in the room at the time of `to_token`. + to_token: The point in the stream to sync up to. + """ + state_ids = await self.get_current_state_ids_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=state_filter, + to_token=to_token, + ) + + events = await self.store.get_events_as_list(list(state_ids.values())) + + state_map = {} + for event in events: + state_map[(event.type, event.state_key)] = event + + return state_map + + @trace + async def get_current_state_deltas_for_room( + self, + room_id: str, + room_membership_for_user_at_to_token: RoomsForUserType, + from_token: RoomStreamToken, + to_token: RoomStreamToken, + ) -> List[StateDelta]: + """ + Get the state deltas between two tokens taking into account the user's + membership. If the user is LEAVE/BAN, we will only get the state deltas up to + their LEAVE/BAN event (inclusive). + + (> `from_token` and <= `to_token`) + """ + membership = room_membership_for_user_at_to_token.membership + # We don't know how to handle `membership` values other than these. The + # code below would need to be updated. + assert membership in ( + Membership.JOIN, + Membership.INVITE, + Membership.KNOCK, + Membership.LEAVE, + Membership.BAN, + ) + + # People shouldn't see past their leave/ban event + if membership in ( + Membership.LEAVE, + Membership.BAN, + ): + to_bound = ( + room_membership_for_user_at_to_token.event_pos.to_room_stream_token() + ) + # If we are participating in the room, we can get the latest current state in + # the room + elif membership == Membership.JOIN: + to_bound = to_token + # We can only rely on the stripped state included in the invite/knock event + # itself so there will never be any state deltas to send down. + elif membership in (Membership.INVITE, Membership.KNOCK): + return [] + else: + # We don't know how to handle this type of membership yet + # + # FIXME: We should use `assert_never` here but for some reason + # the exhaustive matching doesn't recognize the `Never` here. + # assert_never(membership) + raise AssertionError( + f"Unexpected membership {membership} that we don't know how to handle yet" + ) + + return await self.store.get_current_state_deltas_for_room( + room_id=room_id, + from_token=from_token, + to_token=to_bound, + ) + + @trace + async def get_room_sync_data( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + new_connection_state: "MutablePerConnectionState", + room_id: str, + room_sync_config: RoomSyncConfig, + room_membership_for_user_at_to_token: RoomsForUserType, + from_token: Optional[SlidingSyncStreamToken], + to_token: StreamToken, + newly_joined: bool, + newly_left: bool, + is_dm: bool, + ) -> SlidingSyncResult.RoomResult: + """ + Fetch room data for the sync response. + + We fetch data according to the token range (> `from_token` and <= `to_token`). + + Args: + user: User to fetch data for + room_id: The room ID to fetch data for + room_sync_config: Config for what data we should fetch for a room in the + sync response. + room_membership_for_user_at_to_token: Membership information for the user + in the room at the time of `to_token`. + from_token: The point in the stream to sync from. + to_token: The point in the stream to sync up to. + newly_joined: If the user has newly joined the room + newly_left: If the user has newly left the room + is_dm: Whether the room is a DM room + """ + user = sync_config.user + + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "membership", + room_membership_for_user_at_to_token.membership, + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "timeline_limit", + room_sync_config.timeline_limit, + ) + + # Handle state resets. For example, if we see + # `room_membership_for_user_at_to_token.event_id=None and + # room_membership_for_user_at_to_token.membership is not None`, we should + # indicate to the client that a state reset happened. Perhaps we should indicate + # this by setting `initial: True` and empty `required_state: []`. + state_reset_out_of_room = False + if ( + room_membership_for_user_at_to_token.event_id is None + and room_membership_for_user_at_to_token.membership is not None + ): + # We only expect the `event_id` to be `None` if you've been state reset out + # of the room (meaning you're no longer in the room). We could put this as + # part of the if-statement above but we want to handle every case where + # `event_id` is `None`. + assert room_membership_for_user_at_to_token.membership is Membership.LEAVE + + state_reset_out_of_room = True + + prev_room_sync_config = previous_connection_state.room_configs.get(room_id) + + # Determine whether we should limit the timeline to the token range. + # + # We should return historical messages (before token range) in the + # following cases because we want clients to be able to show a basic + # screen of information: + # + # - Initial sync (because no `from_token` to limit us anyway) + # - When users `newly_joined` + # - For an incremental sync where we haven't sent it down this + # connection before + # + # Relevant spec issue: + # https://github.com/matrix-org/matrix-spec/issues/1917 + # + # XXX: Odd behavior - We also check if the `timeline_limit` has increased, if so + # we ignore the from bound for the timeline to send down a larger chunk of + # history and set `unstable_expanded_timeline` to true. This is only being added + # to match the behavior of the Sliding Sync proxy as we expect the ElementX + # client to feel a certain way and be able to trickle in a full page of timeline + # messages to fill up the screen. This is a bit different to the behavior of the + # Sliding Sync proxy (which sets initial=true, but then doesn't send down the + # full state again), but existing apps, e.g. ElementX, just need `limited` set. + # We don't explicitly set `limited` but this will be the case for any room that + # has more history than we're trying to pull out. Using + # `unstable_expanded_timeline` allows us to avoid contaminating what `initial` + # or `limited` mean for clients that interpret them correctly. In future this + # behavior is almost certainly going to change. + # + from_bound = None + initial = True + ignore_timeline_bound = False + if from_token and not newly_joined and not state_reset_out_of_room: + room_status = previous_connection_state.rooms.have_sent_room(room_id) + if room_status.status == HaveSentRoomFlag.LIVE: + from_bound = from_token.stream_token.room_key + initial = False + elif room_status.status == HaveSentRoomFlag.PREVIOUSLY: + assert room_status.last_token is not None + from_bound = room_status.last_token + initial = False + elif room_status.status == HaveSentRoomFlag.NEVER: + from_bound = None + initial = True + else: + assert_never(room_status.status) + + log_kv({"sliding_sync.room_status": room_status}) + + if prev_room_sync_config is not None: + # Check if the timeline limit has increased, if so ignore the + # timeline bound and record the change (see "XXX: Odd behavior" + # above). + if ( + prev_room_sync_config.timeline_limit + < room_sync_config.timeline_limit + ): + ignore_timeline_bound = True + + log_kv( + { + "sliding_sync.from_bound": from_bound, + "sliding_sync.initial": initial, + "sliding_sync.ignore_timeline_bound": ignore_timeline_bound, + } + ) + + # Assemble the list of timeline events + # + # FIXME: It would be nice to make the `rooms` response more uniform regardless of + # membership. Currently, we have to make all of these optional because + # `invite`/`knock` rooms only have `stripped_state`. See + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 + timeline_events: List[EventBase] = [] + bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None + limited: Optional[bool] = None + prev_batch_token: Optional[StreamToken] = None + num_live: Optional[int] = None + if ( + room_sync_config.timeline_limit > 0 + # No timeline for invite/knock rooms (just `stripped_state`) + and room_membership_for_user_at_to_token.membership + not in (Membership.INVITE, Membership.KNOCK) + ): + limited = False + # We want to start off using the `to_token` (vs `from_token`) because we look + # backwards from the `to_token` up to the `timeline_limit` and we might not + # reach the `from_token` before we hit the limit. We will update the room stream + # position once we've fetched the events to point to the earliest event fetched. + prev_batch_token = to_token + + # We're going to paginate backwards from the `to_token` + to_bound = to_token.room_key + # People shouldn't see past their leave/ban event + if room_membership_for_user_at_to_token.membership in ( + Membership.LEAVE, + Membership.BAN, + ): + to_bound = room_membership_for_user_at_to_token.event_pos.to_room_stream_token() + + timeline_from_bound = from_bound + if ignore_timeline_bound: + timeline_from_bound = None + + # For initial `/sync` (and other historical scenarios mentioned above), we + # want to view a historical section of the timeline; to fetch events by + # `topological_ordering` (best representation of the room DAG as others were + # seeing it at the time). This also aligns with the order that `/messages` + # returns events in. + # + # For incremental `/sync`, we want to get all updates for rooms since + # the last `/sync` (regardless if those updates arrived late or happened + # a while ago in the past); to fetch events by `stream_ordering` (in the + # order they were received by the server). + # + # Relevant spec issue: https://github.com/matrix-org/matrix-spec/issues/1917 + # + # FIXME: Using workaround for mypy, + # https://github.com/python/mypy/issues/10740#issuecomment-1997047277 and + # https://github.com/python/mypy/issues/17479 + paginate_room_events_by_topological_ordering: PaginateFunction = ( + self.store.paginate_room_events_by_topological_ordering + ) + paginate_room_events_by_stream_ordering: PaginateFunction = ( + self.store.paginate_room_events_by_stream_ordering + ) + pagination_method: PaginateFunction = ( + # Use `topographical_ordering` for historical events + paginate_room_events_by_topological_ordering + if timeline_from_bound is None + # Use `stream_ordering` for updates + else paginate_room_events_by_stream_ordering + ) + timeline_events, new_room_key, limited = await pagination_method( + room_id=room_id, + # The bounds are reversed so we can paginate backwards + # (from newer to older events) starting at to_bound. + # This ensures we fill the `limit` with the newest events first, + from_key=to_bound, + to_key=timeline_from_bound, + direction=Direction.BACKWARDS, + limit=room_sync_config.timeline_limit, + ) + + # We want to return the events in ascending order (the last event is the + # most recent). + timeline_events.reverse() + + # Make sure we don't expose any events that the client shouldn't see + timeline_events = await filter_events_for_client( + self.storage_controllers, + user.to_string(), + timeline_events, + is_peeking=room_membership_for_user_at_to_token.membership + != Membership.JOIN, + filter_send_to_client=True, + ) + # TODO: Filter out `EventTypes.CallInvite` in public rooms, + # see https://github.com/element-hq/synapse/issues/17359 + + # TODO: Handle timeline gaps (`get_timeline_gaps()`) + + # Determine how many "live" events we have (events within the given token range). + # + # This is mostly useful to determine whether a given @mention event should + # make a noise or not. Clients cannot rely solely on the absence of + # `initial: true` to determine live events because if a room not in the + # sliding window bumps into the window because of an @mention it will have + # `initial: true` yet contain a single live event (with potentially other + # old events in the timeline) + num_live = 0 + if from_token is not None: + for timeline_event in reversed(timeline_events): + # This fields should be present for all persisted events + assert timeline_event.internal_metadata.stream_ordering is not None + assert timeline_event.internal_metadata.instance_name is not None + + persisted_position = PersistedEventPosition( + instance_name=timeline_event.internal_metadata.instance_name, + stream=timeline_event.internal_metadata.stream_ordering, + ) + if persisted_position.persisted_after( + from_token.stream_token.room_key + ): + num_live += 1 + else: + # Since we're iterating over the timeline events in + # reverse-chronological order, we can break once we hit an event + # that's not live. In the future, we could potentially optimize + # this more with a binary search (bisect). + break + + # If the timeline is `limited=True`, the client does not have all events + # necessary to calculate aggregations themselves. + if limited: + bundled_aggregations = ( + await self.relations_handler.get_bundled_aggregations( + timeline_events, user.to_string() + ) + ) + + # Update the `prev_batch_token` to point to the position that allows us to + # keep paginating backwards from the oldest event we return in the timeline. + prev_batch_token = prev_batch_token.copy_and_replace( + StreamKeyType.ROOM, new_room_key + ) + + # Figure out any stripped state events for invite/knocks. This allows the + # potential joiner to identify the room. + stripped_state: List[JsonDict] = [] + if room_membership_for_user_at_to_token.membership in ( + Membership.INVITE, + Membership.KNOCK, + ): + # This should never happen. If someone is invited/knocked on room, then + # there should be an event for it. + assert room_membership_for_user_at_to_token.event_id is not None + + invite_or_knock_event = await self.store.get_event( + room_membership_for_user_at_to_token.event_id + ) + + stripped_state = [] + if invite_or_knock_event.membership == Membership.INVITE: + invite_state = invite_or_knock_event.unsigned.get( + "invite_room_state", [] + ) + if not isinstance(invite_state, list): + invite_state = [] + + stripped_state.extend(invite_state) + elif invite_or_knock_event.membership == Membership.KNOCK: + knock_state = invite_or_knock_event.unsigned.get("knock_room_state", []) + if not isinstance(knock_state, list): + knock_state = [] + + stripped_state.extend(knock_state) + + stripped_state.append(strip_event(invite_or_knock_event)) + + # Get the changes to current state in the token range from the + # `current_state_delta_stream` table. + # + # For incremental syncs, we can do this first to determine if something relevant + # has changed and strategically avoid fetching other costly things. + room_state_delta_id_map: MutableStateMap[str] = {} + name_event_id: Optional[str] = None + membership_changed = False + name_changed = False + avatar_changed = False + if initial: + # Check whether the room has a name set + name_state_ids = await self.get_current_state_ids_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=StateFilter.from_types([(EventTypes.Name, "")]), + to_token=to_token, + ) + name_event_id = name_state_ids.get((EventTypes.Name, "")) + else: + assert from_bound is not None + + # TODO: Limit the number of state events we're about to send down + # the room, if its too many we should change this to an + # `initial=True`? + + # For the case of rejecting remote invites, the leave event won't be + # returned by `get_current_state_deltas_for_room`. This is due to the current + # state only being filled out for rooms the server is in, and so doesn't pick + # up out-of-band leaves (including locally rejected invites) as these events + # are outliers and not added to the `current_state_delta_stream`. + # + # We rely on being explicitly told that the room has been `newly_left` to + # ensure we extract the out-of-band leave. + if newly_left and room_membership_for_user_at_to_token.event_id is not None: + membership_changed = True + leave_event = await self.store.get_event( + room_membership_for_user_at_to_token.event_id + ) + state_key = leave_event.get_state_key() + if state_key is not None: + room_state_delta_id_map[(leave_event.type, state_key)] = ( + room_membership_for_user_at_to_token.event_id + ) + + deltas = await self.get_current_state_deltas_for_room( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + from_token=from_bound, + to_token=to_token.room_key, + ) + for delta in deltas: + # TODO: Handle state resets where event_id is None + if delta.event_id is not None: + room_state_delta_id_map[(delta.event_type, delta.state_key)] = ( + delta.event_id + ) + + if delta.event_type == EventTypes.Member: + membership_changed = True + elif delta.event_type == EventTypes.Name and delta.state_key == "": + name_changed = True + elif ( + delta.event_type == EventTypes.RoomAvatar and delta.state_key == "" + ): + avatar_changed = True + + # We only need the room summary for calculating heroes, however if we do + # fetch it then we can use it to calculate `joined_count` and + # `invited_count`. + room_membership_summary: Optional[Mapping[str, MemberSummary]] = None + + # `heroes` are required if the room name is not set. + # + # Note: When you're the first one on your server to be invited to a new room + # over federation, we only have access to some stripped state in + # `event.unsigned.invite_room_state` which currently doesn't include `heroes`, + # see https://github.com/matrix-org/matrix-spec/issues/380. This means that + # clients won't be able to calculate the room name when necessary and just a + # pitfall we have to deal with until that spec issue is resolved. + hero_user_ids: List[str] = [] + # TODO: Should we also check for `EventTypes.CanonicalAlias` + # (`m.room.canonical_alias`) as a fallback for the room name? see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 + # + # We need to fetch the `heroes` if the room name is not set. But we only need to + # get them on initial syncs (or the first time we send down the room) or if the + # membership has changed which may change the heroes. + if name_event_id is None and (initial or (not initial and membership_changed)): + # We need the room summary to extract the heroes from + if room_membership_for_user_at_to_token.membership != Membership.JOIN: + # TODO: Figure out how to get the membership summary for left/banned rooms + # For invite/knock rooms we don't include the information. + room_membership_summary = {} + else: + room_membership_summary = await self.store.get_room_summary(room_id) + # TODO: Reverse/rewind back to the `to_token` + + hero_user_ids = extract_heroes_from_room_summary( + room_membership_summary, me=user.to_string() + ) + + # Fetch the membership counts for rooms we're joined to. + # + # Similarly to other metadata, we only need to calculate the member + # counts if this is an initial sync or the memberships have changed. + joined_count: Optional[int] = None + invited_count: Optional[int] = None + if ( + initial or membership_changed + ) and room_membership_for_user_at_to_token.membership == Membership.JOIN: + # If we have the room summary (because we calculated heroes above) + # then we can simply pull the counts from there. + if room_membership_summary is not None: + empty_membership_summary = MemberSummary([], 0) + + joined_count = room_membership_summary.get( + Membership.JOIN, empty_membership_summary + ).count + + invited_count = room_membership_summary.get( + Membership.INVITE, empty_membership_summary + ).count + else: + member_counts = await self.store.get_member_counts(room_id) + joined_count = member_counts.get(Membership.JOIN, 0) + invited_count = member_counts.get(Membership.INVITE, 0) + + # Fetch the `required_state` for the room + # + # No `required_state` for invite/knock rooms (just `stripped_state`) + # + # FIXME: It would be nice to make the `rooms` response more uniform regardless + # of membership. Currently, we have to make this optional because + # `invite`/`knock` rooms only have `stripped_state`. See + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 + # + # Calculate the `StateFilter` based on the `required_state` for the room + required_state_filter = StateFilter.none() + # The requested `required_state_map` with the lazy membership expanded and + # `$ME` replaced with the user's ID. This allows us to see what membership we've + # sent down to the client in the next request. + # + # Make a copy so we can modify it. Still need to be careful to make a copy of + # the state key sets if we want to add/remove from them. We could make a deep + # copy but this saves us some work. + expanded_required_state_map = dict(room_sync_config.required_state_map) + if room_membership_for_user_at_to_token.membership not in ( + Membership.INVITE, + Membership.KNOCK, + ): + # If we have a double wildcard ("*", "*") in the `required_state`, we need + # to fetch all state for the room + # + # Note: MSC3575 describes different behavior to how we're handling things + # here but since it's not wrong to return more state than requested + # (`required_state` is just the minimum requested), it doesn't matter if we + # include more than client wanted. This complexity is also under scrutiny, + # see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1185109050 + # + # > One unique exception is when you request all state events via ["*", "*"]. When used, + # > all state events are returned by default, and additional entries FILTER OUT the returned set + # > of state events. These additional entries cannot use '*' themselves. + # > For example, ["*", "*"], ["m.room.member", "@alice:example.com"] will _exclude_ every m.room.member + # > event _except_ for @alice:example.com, and include every other state event. + # > In addition, ["*", "*"], ["m.space.child", "*"] is an error, the m.space.child filter is not + # > required as it would have been returned anyway. + # > + # > -- MSC3575 (https://github.com/matrix-org/matrix-spec-proposals/pull/3575) + if StateValues.WILDCARD in room_sync_config.required_state_map.get( + StateValues.WILDCARD, set() + ): + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "required_state_wildcard", + True, + ) + required_state_filter = StateFilter.all() + # TODO: `StateFilter` currently doesn't support wildcard event types. We're + # currently working around this by returning all state to the client but it + # would be nice to fetch less from the database and return just what the + # client wanted. + elif ( + room_sync_config.required_state_map.get(StateValues.WILDCARD) + is not None + ): + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "required_state_wildcard_event_type", + True, + ) + required_state_filter = StateFilter.all() + else: + required_state_types: List[Tuple[str, Optional[str]]] = [] + num_wild_state_keys = 0 + lazy_load_room_members = False + num_others = 0 + for ( + state_type, + state_key_set, + ) in room_sync_config.required_state_map.items(): + for state_key in state_key_set: + if state_key == StateValues.WILDCARD: + num_wild_state_keys += 1 + # `None` is a wildcard in the `StateFilter` + required_state_types.append((state_type, None)) + # We need to fetch all relevant people when we're lazy-loading membership + elif ( + state_type == EventTypes.Member + and state_key == StateValues.LAZY + ): + lazy_load_room_members = True + + # Everyone in the timeline is relevant + timeline_membership: Set[str] = set() + if timeline_events is not None: + for timeline_event in timeline_events: + # Anyone who sent a message is relevant + timeline_membership.add(timeline_event.sender) + + # We also care about invite, ban, kick, targets, + # etc. + if timeline_event.type == EventTypes.Member: + timeline_membership.add( + timeline_event.state_key + ) + + # Update the required state filter so we pick up the new + # membership + for user_id in timeline_membership: + required_state_types.append( + (EventTypes.Member, user_id) + ) + + # Add an explicit entry for each user in the timeline + # + # Make a new set or copy of the state key set so we can + # modify it without affecting the original + # `required_state_map` + expanded_required_state_map[EventTypes.Member] = ( + expanded_required_state_map.get( + EventTypes.Member, set() + ) + | timeline_membership + ) + elif state_key == StateValues.ME: + num_others += 1 + required_state_types.append((state_type, user.to_string())) + # Replace `$ME` with the user's ID so we can deduplicate + # when someone requests the same state with `$ME` or with + # their user ID. + # + # Make a new set or copy of the state key set so we can + # modify it without affecting the original + # `required_state_map` + expanded_required_state_map[EventTypes.Member] = ( + expanded_required_state_map.get( + EventTypes.Member, set() + ) + | {user.to_string()} + ) + else: + num_others += 1 + required_state_types.append((state_type, state_key)) + + set_tag( + SynapseTags.FUNC_ARG_PREFIX + + "required_state_wildcard_state_key_count", + num_wild_state_keys, + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "required_state_lazy", + lazy_load_room_members, + ) + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "required_state_other_count", + num_others, + ) + + required_state_filter = StateFilter.from_types(required_state_types) + + # We need this base set of info for the response so let's just fetch it along + # with the `required_state` for the room + hero_room_state = [ + (EventTypes.Member, hero_user_id) for hero_user_id in hero_user_ids + ] + meta_room_state = list(hero_room_state) + if initial or name_changed: + meta_room_state.append((EventTypes.Name, "")) + if initial or avatar_changed: + meta_room_state.append((EventTypes.RoomAvatar, "")) + + state_filter = StateFilter.all() + if required_state_filter != StateFilter.all(): + state_filter = StateFilter( + types=StateFilter.from_types( + chain(meta_room_state, required_state_filter.to_types()) + ).types, + include_others=required_state_filter.include_others, + ) + + # The required state map to store in the room sync config, if it has + # changed. + changed_required_state_map: Optional[Mapping[str, AbstractSet[str]]] = None + + # We can return all of the state that was requested if this was the first + # time we've sent the room down this connection. + room_state: StateMap[EventBase] = {} + if initial: + room_state = await self.get_current_state_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=state_filter, + to_token=to_token, + ) + else: + assert from_bound is not None + + if prev_room_sync_config is not None: + # Check if there are any changes to the required state config + # that we need to handle. + changed_required_state_map, added_state_filter = ( + _required_state_changes( + user.to_string(), + prev_required_state_map=prev_room_sync_config.required_state_map, + request_required_state_map=expanded_required_state_map, + state_deltas=room_state_delta_id_map, + ) + ) + + if added_state_filter: + # Some state entries got added, so we pull out the current + # state for them. If we don't do this we'd only send down new deltas. + state_ids = await self.get_current_state_ids_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=added_state_filter, + to_token=to_token, + ) + room_state_delta_id_map.update(state_ids) + + events = await self.store.get_events( + state_filter.filter_state(room_state_delta_id_map).values() + ) + room_state = {(s.type, s.state_key): s for s in events.values()} + + # If the membership changed and we have to get heroes, get the remaining + # heroes from the state + if hero_user_ids: + hero_membership_state = await self.get_current_state_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=StateFilter.from_types(hero_room_state), + to_token=to_token, + ) + room_state.update(hero_membership_state) + + required_room_state: StateMap[EventBase] = {} + if required_state_filter != StateFilter.none(): + required_room_state = required_state_filter.filter_state(room_state) + + # Find the room name and avatar from the state + room_name: Optional[str] = None + # TODO: Should we also check for `EventTypes.CanonicalAlias` + # (`m.room.canonical_alias`) as a fallback for the room name? see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1671260153 + name_event = room_state.get((EventTypes.Name, "")) + if name_event is not None: + room_name = name_event.content.get("name") + + room_avatar: Optional[str] = None + avatar_event = room_state.get((EventTypes.RoomAvatar, "")) + if avatar_event is not None: + room_avatar = avatar_event.content.get("url") + + # Assemble heroes: extract the info from the state we just fetched + heroes: List[SlidingSyncResult.RoomResult.StrippedHero] = [] + for hero_user_id in hero_user_ids: + member_event = room_state.get((EventTypes.Member, hero_user_id)) + if member_event is not None: + heroes.append( + SlidingSyncResult.RoomResult.StrippedHero( + user_id=hero_user_id, + display_name=member_event.content.get("displayname"), + avatar_url=member_event.content.get("avatar_url"), + ) + ) + + # Figure out the last bump event in the room. If the bump stamp hasn't + # changed we omit it from the response. + bump_stamp = None + + always_return_bump_stamp = ( + # We use the membership event position for any non-join + room_membership_for_user_at_to_token.membership != Membership.JOIN + # We didn't fetch any timeline events but we should still check for + # a bump_stamp that might be somewhere + or limited is None + # There might be a bump event somewhere before the timeline events + # that we fetched, that we didn't previously send down + or limited is True + # Always give the client some frame of reference if this is the + # first time they are seeing the room down the connection + or initial + ) + + # If we're joined to the room, we need to find the last bump event before the + # `to_token` + if room_membership_for_user_at_to_token.membership == Membership.JOIN: + # Try and get a bump stamp + new_bump_stamp = await self._get_bump_stamp( + room_id, + to_token, + timeline_events, + check_outside_timeline=always_return_bump_stamp, + ) + if new_bump_stamp is not None: + bump_stamp = new_bump_stamp + + if bump_stamp is None and always_return_bump_stamp: + # By default, just choose the membership event position for any non-join membership + bump_stamp = room_membership_for_user_at_to_token.event_pos.stream + + if bump_stamp is not None and bump_stamp < 0: + # We never want to send down negative stream orderings, as you can't + # sensibly compare positive and negative stream orderings (they have + # different meanings). + # + # A negative bump stamp here can only happen if the stream ordering + # of the membership event is negative (and there are no further bump + # stamps), which can happen if the server leaves and deletes a room, + # and then rejoins it. + # + # To deal with this, we just set the bump stamp to zero, which will + # shove this room to the bottom of the list. This is OK as the + # moment a new message happens in the room it will get put into a + # sensible order again. + bump_stamp = 0 + + room_sync_required_state_map_to_persist: Mapping[str, AbstractSet[str]] = ( + expanded_required_state_map + ) + if changed_required_state_map: + room_sync_required_state_map_to_persist = changed_required_state_map + + # Record the `room_sync_config` if we're `ignore_timeline_bound` (which means + # that the `timeline_limit` has increased) + unstable_expanded_timeline = False + if ignore_timeline_bound: + # FIXME: We signal the fact that we're sending down more events to + # the client by setting `unstable_expanded_timeline` to true (see + # "XXX: Odd behavior" above). + unstable_expanded_timeline = True + + new_connection_state.room_configs[room_id] = RoomSyncConfig( + timeline_limit=room_sync_config.timeline_limit, + required_state_map=room_sync_required_state_map_to_persist, + ) + elif prev_room_sync_config is not None: + # If the result is `limited` then we need to record that the + # `timeline_limit` has been reduced, as when/if the client later requests + # more timeline then we have more data to send. + # + # Otherwise (when not `limited`) we don't need to record that the + # `timeline_limit` has been reduced, as the *effective* `timeline_limit` + # (i.e. the amount of timeline we have previously sent to the client) is at + # least the previous `timeline_limit`. + # + # This is to handle the case where the `timeline_limit` e.g. goes from 10 to + # 5 to 10 again (without any timeline gaps), where there's no point sending + # down the initial historical chunk events when the `timeline_limit` is + # increased as the client already has the 10 previous events. However, if + # client has a gap in the timeline (i.e. `limited` is True), then we *do* + # need to record the reduced timeline. + # + # TODO: Handle timeline gaps (`get_timeline_gaps()`) - This is separate from + # the gaps we might see on the client because a response was `limited` we're + # talking about above. + if ( + limited + and prev_room_sync_config.timeline_limit + > room_sync_config.timeline_limit + ): + new_connection_state.room_configs[room_id] = RoomSyncConfig( + timeline_limit=room_sync_config.timeline_limit, + required_state_map=room_sync_required_state_map_to_persist, + ) + + elif changed_required_state_map is not None: + new_connection_state.room_configs[room_id] = RoomSyncConfig( + timeline_limit=room_sync_config.timeline_limit, + required_state_map=room_sync_required_state_map_to_persist, + ) + + else: + new_connection_state.room_configs[room_id] = RoomSyncConfig( + timeline_limit=room_sync_config.timeline_limit, + required_state_map=room_sync_required_state_map_to_persist, + ) + + set_tag(SynapseTags.RESULT_PREFIX + "initial", initial) + + return SlidingSyncResult.RoomResult( + name=room_name, + avatar=room_avatar, + heroes=heroes, + is_dm=is_dm, + initial=initial, + required_state=list(required_room_state.values()), + timeline_events=timeline_events, + bundled_aggregations=bundled_aggregations, + stripped_state=stripped_state, + prev_batch=prev_batch_token, + limited=limited, + unstable_expanded_timeline=unstable_expanded_timeline, + num_live=num_live, + bump_stamp=bump_stamp, + joined_count=joined_count, + invited_count=invited_count, + # TODO: These are just dummy values. We could potentially just remove these + # since notifications can only really be done correctly on the client anyway + # (encrypted rooms). + notification_count=0, + highlight_count=0, + ) + + @trace + async def _get_bump_stamp( + self, + room_id: str, + to_token: StreamToken, + timeline: List[EventBase], + check_outside_timeline: bool, + ) -> Optional[int]: + """Get a bump stamp for the room, if we have a bump event and it has + changed. + + Args: + room_id + to_token: The upper bound of token to return + timeline: The list of events we have fetched. + limited: If the timeline was limited. + check_outside_timeline: Whether we need to check for bump stamp for + events before the timeline if we didn't find a bump stamp in + the timeline events. + """ + + # First check the timeline events we're returning to see if one of + # those matches. We iterate backwards and take the stream ordering + # of the first event that matches the bump event types. + for timeline_event in reversed(timeline): + if timeline_event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES: + new_bump_stamp = timeline_event.internal_metadata.stream_ordering + + # All persisted events have a stream ordering + assert new_bump_stamp is not None + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_stamp > 0: + return new_bump_stamp + + if not check_outside_timeline: + # If we are not a limited sync, then we know the bump stamp can't + # have changed. + return None + + # We can quickly query for the latest bump event in the room using the + # sliding sync tables. + latest_room_bump_stamp = await self.store.get_latest_bump_stamp_for_room( + room_id + ) + + min_to_token_position = to_token.room_key.stream + + # If we can rely on the new sliding sync tables and the `bump_stamp` is + # `None`, just fallback to the membership event position. This can happen + # when we've just joined a remote room and all the events are backfilled. + if ( + # FIXME: The background job check can be removed once we bump + # `SCHEMA_COMPAT_VERSION` and run the foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` + # (tracked by https://github.com/element-hq/synapse/issues/17623) + latest_room_bump_stamp is None + and await self.store.have_finished_sliding_sync_background_jobs() + ): + return None + + # The `bump_stamp` stored in the database might be ahead of our token. Since + # `bump_stamp` is only a `stream_ordering` position, we can't be 100% sure + # that's before the `to_token` in all scenarios. The only scenario we can be + # sure of is if the `bump_stamp` is totally before the minimum position from + # the token. + # + # We don't need to check if the background update has finished, as if the + # returned bump stamp is not None then it must be up to date. + elif ( + latest_room_bump_stamp is not None + and latest_room_bump_stamp < min_to_token_position + ): + if latest_room_bump_stamp > 0: + return latest_room_bump_stamp + else: + return None + + # Otherwise, if it's within or after the `to_token`, we need to find the + # last bump event before the `to_token`. + else: + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) + ) + if last_bump_event_result is not None: + _, new_bump_event_pos = last_bump_event_result + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_event_pos.stream > 0: + return new_bump_event_pos.stream + + return None + + +def _required_state_changes( + user_id: str, + *, + prev_required_state_map: Mapping[str, AbstractSet[str]], + request_required_state_map: Mapping[str, AbstractSet[str]], + state_deltas: StateMap[str], +) -> Tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]: + """Calculates the changes between the required state room config from the + previous requests compared with the current request. + + This does two things. First, it calculates if we need to update the room + config due to changes to required state. Secondly, it works out which state + entries we need to pull from current state and return due to the state entry + now appearing in the required state when it previously wasn't (on top of the + state deltas). + + This function tries to ensure to handle the case where a state entry is + added, removed and then added again to the required state. In that case we + only want to re-send that entry down sync if it has changed. + + Returns: + A 2-tuple of updated required state config (or None if there is no update) + and the state filter to use to fetch extra current state that we need to + return. + """ + if prev_required_state_map == request_required_state_map: + # There has been no change. Return immediately. + return None, StateFilter.none() + + prev_wildcard = prev_required_state_map.get(StateValues.WILDCARD, set()) + request_wildcard = request_required_state_map.get(StateValues.WILDCARD, set()) + + # If we were previously fetching everything ("*", "*"), always update the effective + # room required state config to match the request. And since we we're previously + # already fetching everything, we don't have to fetch anything now that they've + # narrowed. + if StateValues.WILDCARD in prev_wildcard: + return request_required_state_map, StateFilter.none() + + # If a event type wildcard has been added or removed we don't try and do + # anything fancy, and instead always update the effective room required + # state config to match the request. + if request_wildcard - prev_wildcard: + # Some keys were added, so we need to fetch everything + return request_required_state_map, StateFilter.all() + if prev_wildcard - request_wildcard: + # Keys were only removed, so we don't have to fetch everything. + return request_required_state_map, StateFilter.none() + + # Contains updates to the required state map compared with the previous room + # config. This has the same format as `RoomSyncConfig.required_state` + changes: Dict[str, AbstractSet[str]] = {} + + # The set of types/state keys that we need to fetch and return to the + # client. Passed to `StateFilter.from_types(...)` + added: List[Tuple[str, Optional[str]]] = [] + + # Convert the list of state deltas to map from type to state_keys that have + # changed. + changed_types_to_state_keys: Dict[str, Set[str]] = {} + for event_type, state_key in state_deltas: + changed_types_to_state_keys.setdefault(event_type, set()).add(state_key) + + # First we calculate what, if anything, has been *added*. + for event_type in ( + prev_required_state_map.keys() | request_required_state_map.keys() + ): + old_state_keys = prev_required_state_map.get(event_type, set()) + request_state_keys = request_required_state_map.get(event_type, set()) + changed_state_keys = changed_types_to_state_keys.get(event_type, set()) + + if old_state_keys == request_state_keys: + # No change to this type + continue + + if not request_state_keys - old_state_keys: + # Nothing *added*, so we skip. Removals happen below. + continue + + # We only remove state keys from the effective state if they've been + # removed from the request *and* the state has changed. This ensures + # that if a client removes and then re-adds a state key, we only send + # down the associated current state event if its changed (rather than + # sending down the same event twice). + invalidated_state_keys = ( + old_state_keys - request_state_keys + ) & changed_state_keys + + # Figure out which state keys we should remember sending down the connection + inheritable_previous_state_keys = ( + # Retain the previous state_keys that we've sent down before. + # Wildcard and lazy state keys are not sticky from previous requests. + (old_state_keys - {StateValues.WILDCARD, StateValues.LAZY}) + - invalidated_state_keys + ) + + # Always update changes to include the newly added keys (we've expanded the set + # of state keys), use the new requested set with whatever hasn't been + # invalidated from the previous set. + changes[event_type] = request_state_keys | inheritable_previous_state_keys + # Limit the number of state_keys we should remember sending down the connection + # for each (room_id, user_id). We don't want to store and pull out too much data + # in the database. This is a happy-medium between remembering nothing and + # everything. We can avoid sending redundant state down the connection most of + # the time given that most rooms don't have 100 members anyway and it takes a + # while to cycle through 100 members. + # + # Only remember up to (MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER) + if len(changes[event_type]) > MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER: + # Reset back to only the requested state keys + changes[event_type] = request_state_keys + + # Skip if there isn't any room to fill in the rest with previous state keys + if len(request_state_keys) < MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER: + # Fill the rest with previous state_keys. Ideally, we could sort + # these by recency but it's just a set so just pick an arbitrary + # subset (good enough). + changes[event_type] = changes[event_type] | set( + itertools.islice( + inheritable_previous_state_keys, + # Just taking the difference isn't perfect as there could be + # overlap in the keys between the requested and previous but we + # will decide to just take the easy route for now and avoid + # additional set operations to figure it out. + MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER + - len(request_state_keys), + ) + ) + + if StateValues.WILDCARD in old_state_keys: + # We were previously fetching everything for this type, so we don't need to + # fetch anything new. + continue + + # Record the new state keys to fetch for this type. + if StateValues.WILDCARD in request_state_keys: + # If we have added a wildcard then we always just fetch everything. + added.append((event_type, None)) + else: + for state_key in request_state_keys - old_state_keys: + if state_key == StateValues.ME: + added.append((event_type, user_id)) + elif state_key == StateValues.LAZY: + # We handle lazy loading separately (outside this function), + # so don't need to explicitly add anything here. + # + # LAZY values should also be ignore for event types that are + # not membership. + pass + else: + added.append((event_type, state_key)) + + added_state_filter = StateFilter.from_types(added) + + # Figure out what changes we need to apply to the effective required state + # config. + for event_type, changed_state_keys in changed_types_to_state_keys.items(): + old_state_keys = prev_required_state_map.get(event_type, set()) + request_state_keys = request_required_state_map.get(event_type, set()) + + if old_state_keys == request_state_keys: + # No change. + continue + + # If we see the `user_id` as a state_key, also add "$ME" to the list of state + # that has changed to account for people requesting `required_state` with `$ME` + # or their user ID. + if user_id in changed_state_keys: + changed_state_keys.add(StateValues.ME) + + # We only remove state keys from the effective state if they've been + # removed from the request *and* the state has changed. This ensures + # that if a client removes and then re-adds a state key, we only send + # down the associated current state event if its changed (rather than + # sending down the same event twice). + invalidated_state_keys = ( + old_state_keys - request_state_keys + ) & changed_state_keys + + # We've expanded the set of state keys, ... (already handled above) + if request_state_keys - old_state_keys: + continue + + old_state_key_wildcard = StateValues.WILDCARD in old_state_keys + request_state_key_wildcard = StateValues.WILDCARD in request_state_keys + + if old_state_key_wildcard != request_state_key_wildcard: + # If a state_key wildcard has been added or removed, we always update the + # effective room required state config to match the request. + changes[event_type] = request_state_keys + continue + + if event_type == EventTypes.Member: + old_state_key_lazy = StateValues.LAZY in old_state_keys + request_state_key_lazy = StateValues.LAZY in request_state_keys + + if old_state_key_lazy != request_state_key_lazy: + # If a "$LAZY" has been added or removed we always update the effective room + # required state config to match the request. + changes[event_type] = request_state_keys + continue + + # At this point there are no wildcards and no additions to the set of + # state keys requested, only deletions. + # + # We only remove state keys from the effective state if they've been + # removed from the request *and* the state has changed. This ensures + # that if a client removes and then re-adds a state key, we only send + # down the associated current state event if its changed (rather than + # sending down the same event twice). + if invalidated_state_keys: + changes[event_type] = old_state_keys - invalidated_state_keys + + if changes: + # Update the required state config based on the changes. + new_required_state_map = dict(prev_required_state_map) + for event_type, state_keys in changes.items(): + if state_keys: + new_required_state_map[event_type] = state_keys + else: + # Remove entries with empty state keys. + new_required_state_map.pop(event_type, None) + + return new_required_state_map, added_state_filter + else: + return None, added_state_filter diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py new file mode 100644
index 0000000000..077887ec32 --- /dev/null +++ b/synapse/handlers/sliding_sync/extensions.py
@@ -0,0 +1,879 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +import itertools +import logging +from typing import ( + TYPE_CHECKING, + AbstractSet, + ChainMap, + Dict, + Mapping, + MutableMapping, + Optional, + Sequence, + Set, + cast, +) + +from typing_extensions import assert_never + +from synapse.api.constants import AccountDataTypes, EduTypes +from synapse.handlers.receipts import ReceiptEventSource +from synapse.logging.opentracing import trace +from synapse.storage.databases.main.receipts import ReceiptInRoom +from synapse.types import ( + DeviceListUpdates, + JsonMapping, + MultiWriterStreamToken, + SlidingSyncStreamToken, + StrCollection, + StreamToken, +) +from synapse.types.handlers.sliding_sync import ( + HaveSentRoomFlag, + MutablePerConnectionState, + OperationType, + PerConnectionState, + SlidingSyncConfig, + SlidingSyncResult, +) +from synapse.util.async_helpers import ( + concurrently_execute, + gather_optional_coroutines, +) + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class SlidingSyncExtensionHandler: + """Handles the extensions to sliding sync.""" + + def __init__(self, hs: "HomeServer"): + self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.device_handler = hs.get_device_handler() + self.push_rules_handler = hs.get_push_rules_handler() + + @trace + async def get_extensions_response( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + new_connection_state: "MutablePerConnectionState", + actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> SlidingSyncResult.Extensions: + """Handle extension requests. + + Args: + sync_config: Sync configuration + new_connection_state: Snapshot of the current per-connection state + new_per_connection_state: A mutable copy of the per-connection + state, used to record updates to the state during this request. + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + actual_room_response_map: A map of room ID to room results in the the + Sliding Sync response. + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + + if sync_config.extensions is None: + return SlidingSyncResult.Extensions() + + to_device_coro = None + if sync_config.extensions.to_device is not None: + to_device_coro = self.get_to_device_extension_response( + sync_config=sync_config, + to_device_request=sync_config.extensions.to_device, + to_token=to_token, + ) + + e2ee_coro = None + if sync_config.extensions.e2ee is not None: + e2ee_coro = self.get_e2ee_extension_response( + sync_config=sync_config, + e2ee_request=sync_config.extensions.e2ee, + to_token=to_token, + from_token=from_token, + ) + + account_data_coro = None + if sync_config.extensions.account_data is not None: + account_data_coro = self.get_account_data_extension_response( + sync_config=sync_config, + previous_connection_state=previous_connection_state, + new_connection_state=new_connection_state, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + account_data_request=sync_config.extensions.account_data, + to_token=to_token, + from_token=from_token, + ) + + receipts_coro = None + if sync_config.extensions.receipts is not None: + receipts_coro = self.get_receipts_extension_response( + sync_config=sync_config, + previous_connection_state=previous_connection_state, + new_connection_state=new_connection_state, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + actual_room_response_map=actual_room_response_map, + receipts_request=sync_config.extensions.receipts, + to_token=to_token, + from_token=from_token, + ) + + typing_coro = None + if sync_config.extensions.typing is not None: + typing_coro = self.get_typing_extension_response( + sync_config=sync_config, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + actual_room_response_map=actual_room_response_map, + typing_request=sync_config.extensions.typing, + to_token=to_token, + from_token=from_token, + ) + + ( + to_device_response, + e2ee_response, + account_data_response, + receipts_response, + typing_response, + ) = await gather_optional_coroutines( + to_device_coro, + e2ee_coro, + account_data_coro, + receipts_coro, + typing_coro, + ) + + return SlidingSyncResult.Extensions( + to_device=to_device_response, + e2ee=e2ee_response, + account_data=account_data_response, + receipts=receipts_response, + typing=typing_response, + ) + + def find_relevant_room_ids_for_extension( + self, + requested_lists: Optional[StrCollection], + requested_room_ids: Optional[StrCollection], + actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: AbstractSet[str], + ) -> Set[str]: + """ + Handle the reserved `lists`/`rooms` keys for extensions. Extensions should only + return results for rooms in the Sliding Sync response. This matches up the + requested rooms/lists with the actual lists/rooms in the Sliding Sync response. + + {"lists": []} // Do not process any lists. + {"lists": ["rooms", "dms"]} // Process only a subset of lists. + {"lists": ["*"]} // Process all lists defined in the Sliding Window API. (This is the default.) + + {"rooms": []} // Do not process any specific rooms. + {"rooms": ["!a:b", "!c:d"]} // Process only a subset of room subscriptions. + {"rooms": ["*"]} // Process all room subscriptions defined in the Room Subscription API. (This is the default.) + + Args: + requested_lists: The `lists` from the extension request. + requested_room_ids: The `rooms` from the extension request. + actual_lists: The actual lists from the Sliding Sync response. + actual_room_ids: The actual room subscriptions from the Sliding Sync request. + """ + + # We only want to include account data for rooms that are already in the sliding + # sync response AND that were requested in the account data request. + relevant_room_ids: Set[str] = set() + + # See what rooms from the room subscriptions we should get account data for + if requested_room_ids is not None: + for room_id in requested_room_ids: + # A wildcard means we process all rooms from the room subscriptions + if room_id == "*": + relevant_room_ids.update(actual_room_ids) + break + + if room_id in actual_room_ids: + relevant_room_ids.add(room_id) + + # See what rooms from the sliding window lists we should get account data for + if requested_lists is not None: + for list_key in requested_lists: + # Just some typing because we share the variable name in multiple places + actual_list: Optional[SlidingSyncResult.SlidingWindowList] = None + + # A wildcard means we process rooms from all lists + if list_key == "*": + for actual_list in actual_lists.values(): + # We only expect a single SYNC operation for any list + assert len(actual_list.ops) == 1 + sync_op = actual_list.ops[0] + assert sync_op.op == OperationType.SYNC + + relevant_room_ids.update(sync_op.room_ids) + + break + + actual_list = actual_lists.get(list_key) + if actual_list is not None: + # We only expect a single SYNC operation for any list + assert len(actual_list.ops) == 1 + sync_op = actual_list.ops[0] + assert sync_op.op == OperationType.SYNC + + relevant_room_ids.update(sync_op.room_ids) + + return relevant_room_ids + + @trace + async def get_to_device_extension_response( + self, + sync_config: SlidingSyncConfig, + to_device_request: SlidingSyncConfig.Extensions.ToDeviceExtension, + to_token: StreamToken, + ) -> Optional[SlidingSyncResult.Extensions.ToDeviceExtension]: + """Handle to-device extension (MSC3885) + + Args: + sync_config: Sync configuration + to_device_request: The to-device extension from the request + to_token: The point in the stream to sync up to. + """ + user_id = sync_config.user.to_string() + device_id = sync_config.requester.device_id + + # Skip if the extension is not enabled + if not to_device_request.enabled: + return None + + # Check that this request has a valid device ID (not all requests have + # to belong to a device, and so device_id is None) + if device_id is None: + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=f"{to_token.to_device_key}", + events=[], + ) + + since_stream_id = 0 + if to_device_request.since is not None: + # We've already validated this is an int. + since_stream_id = int(to_device_request.since) + + if to_token.to_device_key < since_stream_id: + # The since token is ahead of our current token, so we return an + # empty response. + logger.warning( + "Got to-device.since from the future. since token: %r is ahead of our current to_device stream position: %r", + since_stream_id, + to_token.to_device_key, + ) + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=to_device_request.since, + events=[], + ) + + # Delete everything before the given since token, as we know the + # device must have received them. + deleted = await self.store.delete_messages_for_device( + user_id=user_id, + device_id=device_id, + up_to_stream_id=since_stream_id, + ) + + logger.debug( + "Deleted %d to-device messages up to %d for %s", + deleted, + since_stream_id, + user_id, + ) + + messages, stream_id = await self.store.get_messages_for_device( + user_id=user_id, + device_id=device_id, + from_stream_id=since_stream_id, + to_stream_id=to_token.to_device_key, + limit=min(to_device_request.limit, 100), # Limit to at most 100 events + ) + + return SlidingSyncResult.Extensions.ToDeviceExtension( + next_batch=f"{stream_id}", + events=messages, + ) + + @trace + async def get_e2ee_extension_response( + self, + sync_config: SlidingSyncConfig, + e2ee_request: SlidingSyncConfig.Extensions.E2eeExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.E2eeExtension]: + """Handle E2EE device extension (MSC3884) + + Args: + sync_config: Sync configuration + e2ee_request: The e2ee extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + user_id = sync_config.user.to_string() + device_id = sync_config.requester.device_id + + # Skip if the extension is not enabled + if not e2ee_request.enabled: + return None + + device_list_updates: Optional[DeviceListUpdates] = None + if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` + device_list_updates = await self.device_handler.get_user_ids_changed( + user_id=user_id, + from_token=from_token.stream_token, + ) + + device_one_time_keys_count: Mapping[str, int] = {} + device_unused_fallback_key_types: Sequence[str] = [] + if device_id: + # TODO: We should have a way to let clients differentiate between the states of: + # * no change in OTK count since the provided since token + # * the server has zero OTKs left for this device + # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 + device_one_time_keys_count = await self.store.count_e2e_one_time_keys( + user_id, device_id + ) + device_unused_fallback_key_types = ( + await self.store.get_e2e_unused_fallback_key_types(user_id, device_id) + ) + + return SlidingSyncResult.Extensions.E2eeExtension( + device_list_updates=device_list_updates, + device_one_time_keys_count=device_one_time_keys_count, + device_unused_fallback_key_types=device_unused_fallback_key_types, + ) + + @trace + async def get_account_data_extension_response( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + new_connection_state: "MutablePerConnectionState", + actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + account_data_request: SlidingSyncConfig.Extensions.AccountDataExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.AccountDataExtension]: + """Handle Account Data extension (MSC3959) + + Args: + sync_config: Sync configuration + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + account_data_request: The account_data extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + user_id = sync_config.user.to_string() + + # Skip if the extension is not enabled + if not account_data_request.enabled: + return None + + global_account_data_map: Mapping[str, JsonMapping] = {} + if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` + global_account_data_map = ( + await self.store.get_updated_global_account_data_for_user( + user_id, from_token.stream_token.account_data_key + ) + ) + + # TODO: This should take into account the `from_token` and `to_token` + have_push_rules_changed = await self.store.have_push_rules_changed_for_user( + user_id, from_token.stream_token.push_rules_key + ) + if have_push_rules_changed: + # TODO: This should take into account the `from_token` and `to_token` + global_account_data_map[ + AccountDataTypes.PUSH_RULES + ] = await self.push_rules_handler.push_rules_for_user(sync_config.user) + else: + # TODO: This should take into account the `to_token` + immutable_global_account_data_map = ( + await self.store.get_global_account_data_for_user(user_id) + ) + + # Use a `ChainMap` to avoid copying the immutable data from the cache + global_account_data_map = ChainMap( + { + # TODO: This should take into account the `to_token` + AccountDataTypes.PUSH_RULES: await self.push_rules_handler.push_rules_for_user( + sync_config.user + ) + }, + # Cast is safe because `ChainMap` only mutates the top-most map, + # see https://github.com/python/typeshed/issues/8430 + cast( + MutableMapping[str, JsonMapping], immutable_global_account_data_map + ), + ) + + # Fetch room account data + # + account_data_by_room_map: MutableMapping[str, Mapping[str, JsonMapping]] = {} + relevant_room_ids = self.find_relevant_room_ids_for_extension( + requested_lists=account_data_request.lists, + requested_room_ids=account_data_request.rooms, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + ) + if len(relevant_room_ids) > 0: + # We need to handle the different cases depending on if we have sent + # down account data previously or not, so we split the relevant + # rooms up into different collections based on status. + live_rooms = set() + previously_rooms: Dict[str, int] = {} + initial_rooms = set() + + for room_id in relevant_room_ids: + if not from_token: + initial_rooms.add(room_id) + continue + + room_status = previous_connection_state.account_data.have_sent_room( + room_id + ) + if room_status.status == HaveSentRoomFlag.LIVE: + live_rooms.add(room_id) + elif room_status.status == HaveSentRoomFlag.PREVIOUSLY: + assert room_status.last_token is not None + previously_rooms[room_id] = room_status.last_token + elif room_status.status == HaveSentRoomFlag.NEVER: + initial_rooms.add(room_id) + else: + assert_never(room_status.status) + + # We fetch all room account data since the from_token. This is so + # that we can record which rooms have updates that haven't been sent + # down. + # + # Mapping from room_id to mapping of `type` to `content` of room account + # data events. + all_updates_since_the_from_token: Mapping[ + str, Mapping[str, JsonMapping] + ] = {} + if from_token is not None: + # TODO: This should take into account the `from_token` and `to_token` + all_updates_since_the_from_token = ( + await self.store.get_updated_room_account_data_for_user( + user_id, from_token.stream_token.account_data_key + ) + ) + + # Add room tags + # + # TODO: This should take into account the `from_token` and `to_token` + tags_by_room = await self.store.get_updated_tags( + user_id, from_token.stream_token.account_data_key + ) + for room_id, tags in tags_by_room.items(): + all_updates_since_the_from_token.setdefault(room_id, {})[ + AccountDataTypes.TAG + ] = {"tags": tags} + + # For live rooms we just get the updates from `all_updates_since_the_from_token` + if live_rooms: + for room_id in all_updates_since_the_from_token.keys() & live_rooms: + account_data_by_room_map[room_id] = ( + all_updates_since_the_from_token[room_id] + ) + + # For previously and initial rooms we query each room individually. + if previously_rooms or initial_rooms: + + async def handle_previously(room_id: str) -> None: + # Either get updates or all account data in the room + # depending on if the room state is PREVIOUSLY or NEVER. + previous_token = previously_rooms.get(room_id) + if previous_token is not None: + room_account_data = await ( + self.store.get_updated_room_account_data_for_user_for_room( + user_id=user_id, + room_id=room_id, + from_stream_id=previous_token, + to_stream_id=to_token.account_data_key, + ) + ) + + # Add room tags + changed = await self.store.has_tags_changed_for_room( + user_id=user_id, + room_id=room_id, + from_stream_id=previous_token, + to_stream_id=to_token.account_data_key, + ) + if changed: + # XXX: Ideally, this should take into account the `to_token` + # and return the set of tags at that time but we don't track + # changes to tags so we just have to return all tags for the + # room. + immutable_tag_map = await self.store.get_tags_for_room( + user_id, room_id + ) + room_account_data[AccountDataTypes.TAG] = { + "tags": immutable_tag_map + } + + # Only add an entry if there were any updates. + if room_account_data: + account_data_by_room_map[room_id] = room_account_data + else: + # TODO: This should take into account the `to_token` + immutable_room_account_data = ( + await self.store.get_account_data_for_room(user_id, room_id) + ) + + # Add room tags + # + # XXX: Ideally, this should take into account the `to_token` + # and return the set of tags at that time but we don't track + # changes to tags so we just have to return all tags for the + # room. + immutable_tag_map = await self.store.get_tags_for_room( + user_id, room_id + ) + + account_data_by_room_map[room_id] = ChainMap( + {AccountDataTypes.TAG: {"tags": immutable_tag_map}} + if immutable_tag_map + else {}, + # Cast is safe because `ChainMap` only mutates the top-most map, + # see https://github.com/python/typeshed/issues/8430 + cast( + MutableMapping[str, JsonMapping], + immutable_room_account_data, + ), + ) + + # We handle these rooms concurrently to speed it up. + await concurrently_execute( + handle_previously, + previously_rooms.keys() | initial_rooms, + limit=20, + ) + + # Now record which rooms are now up to data, and which rooms have + # pending updates to send. + new_connection_state.account_data.record_sent_rooms(previously_rooms.keys()) + new_connection_state.account_data.record_sent_rooms(initial_rooms) + missing_updates = ( + all_updates_since_the_from_token.keys() - relevant_room_ids + ) + if missing_updates: + # If we have missing updates then we must have had a from_token. + assert from_token is not None + + new_connection_state.account_data.record_unsent_rooms( + missing_updates, from_token.stream_token.account_data_key + ) + + return SlidingSyncResult.Extensions.AccountDataExtension( + global_account_data_map=global_account_data_map, + account_data_by_room_map=account_data_by_room_map, + ) + + @trace + async def get_receipts_extension_response( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + new_connection_state: "MutablePerConnectionState", + actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], + receipts_request: SlidingSyncConfig.Extensions.ReceiptsExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.ReceiptsExtension]: + """Handle Receipts extension (MSC3960) + + Args: + sync_config: Sync configuration + previous_connection_state: The current per-connection state + new_connection_state: A mutable copy of the per-connection + state, used to record updates to the state. + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + actual_room_response_map: A map of room ID to room results in the the + Sliding Sync response. + account_data_request: The account_data extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + # Skip if the extension is not enabled + if not receipts_request.enabled: + return None + + relevant_room_ids = self.find_relevant_room_ids_for_extension( + requested_lists=receipts_request.lists, + requested_room_ids=receipts_request.rooms, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + ) + + room_id_to_receipt_map: Dict[str, JsonMapping] = {} + if len(relevant_room_ids) > 0: + # We need to handle the different cases depending on if we have sent + # down receipts previously or not, so we split the relevant rooms + # up into different collections based on status. + live_rooms = set() + previously_rooms: Dict[str, MultiWriterStreamToken] = {} + initial_rooms = set() + + for room_id in relevant_room_ids: + if not from_token: + initial_rooms.add(room_id) + continue + + # If we're sending down the room from scratch again for some + # reason, we should always resend the receipts as well + # (regardless of if we've sent them down before). This is to + # mimic the behaviour of what happens on initial sync, where you + # get a chunk of timeline with all of the corresponding receipts + # for the events in the timeline. + # + # We also resend down receipts when we "expand" the timeline, + # (see the "XXX: Odd behavior" in + # `synapse.handlers.sliding_sync`). + room_result = actual_room_response_map.get(room_id) + if room_result is not None: + if room_result.initial or room_result.unstable_expanded_timeline: + initial_rooms.add(room_id) + continue + + room_status = previous_connection_state.receipts.have_sent_room(room_id) + if room_status.status == HaveSentRoomFlag.LIVE: + live_rooms.add(room_id) + elif room_status.status == HaveSentRoomFlag.PREVIOUSLY: + assert room_status.last_token is not None + previously_rooms[room_id] = room_status.last_token + elif room_status.status == HaveSentRoomFlag.NEVER: + initial_rooms.add(room_id) + else: + assert_never(room_status.status) + + # The set of receipts that we fetched. Private receipts need to be + # filtered out before returning. + fetched_receipts = [] + + # For live rooms we just fetch all receipts in those rooms since the + # `since` token. + if live_rooms: + assert from_token is not None + receipts = await self.store.get_linearized_receipts_for_rooms( + room_ids=live_rooms, + from_key=from_token.stream_token.receipt_key, + to_key=to_token.receipt_key, + ) + fetched_receipts.extend(receipts) + + # For rooms we've previously sent down, but aren't up to date, we + # need to use the from token from the room status. + if previously_rooms: + # Fetch any missing rooms concurrently. + + async def handle_previously_room(room_id: str) -> None: + receipt_token = previously_rooms[room_id] + # TODO: Limit the number of receipts we're about to send down + # for the room, if its too many we should TODO + previously_receipts = ( + await self.store.get_linearized_receipts_for_room( + room_id=room_id, + from_key=receipt_token, + to_key=to_token.receipt_key, + ) + ) + fetched_receipts.extend(previously_receipts) + + await concurrently_execute( + handle_previously_room, previously_rooms.keys(), 20 + ) + + if initial_rooms: + # We also always send down receipts for the current user. + user_receipts = ( + await self.store.get_linearized_receipts_for_user_in_rooms( + user_id=sync_config.user.to_string(), + room_ids=initial_rooms, + to_key=to_token.receipt_key, + ) + ) + + # For rooms we haven't previously sent down, we could send all receipts + # from that room but we only want to include receipts for events + # in the timeline to avoid bloating and blowing up the sync response + # as the number of users in the room increases. (this behavior is part of the spec) + initial_rooms_and_event_ids = [ + (room_id, event.event_id) + for room_id in initial_rooms + if room_id in actual_room_response_map + for event in actual_room_response_map[room_id].timeline_events + ] + initial_receipts = await self.store.get_linearized_receipts_for_events( + room_and_event_ids=initial_rooms_and_event_ids, + ) + + # Combine the receipts for a room and add them to + # `fetched_receipts` + for room_id in initial_receipts.keys() | user_receipts.keys(): + receipt_content = ReceiptInRoom.merge_to_content( + list( + itertools.chain( + initial_receipts.get(room_id, []), + user_receipts.get(room_id, []), + ) + ) + ) + + fetched_receipts.append( + { + "room_id": room_id, + "type": EduTypes.RECEIPT, + "content": receipt_content, + } + ) + + fetched_receipts = ReceiptEventSource.filter_out_private_receipts( + fetched_receipts, sync_config.user.to_string() + ) + + for receipt in fetched_receipts: + # These fields should exist for every receipt + room_id = receipt["room_id"] + type = receipt["type"] + content = receipt["content"] + + room_id_to_receipt_map[room_id] = {"type": type, "content": content} + + # Update the per-connection state to track which rooms we have sent + # all the receipts for. + new_connection_state.receipts.record_sent_rooms(previously_rooms.keys()) + new_connection_state.receipts.record_sent_rooms(initial_rooms) + + if from_token: + # Now find the set of rooms that may have receipts that we're not sending + # down. We only need to check rooms that we have previously returned + # receipts for (in `previous_connection_state`) because we only care about + # updating `LIVE` rooms to `PREVIOUSLY`. The `PREVIOUSLY` rooms will just + # stay pointing at their previous position so we don't need to waste time + # checking those and since we default to `NEVER`, rooms that were `NEVER` + # sent before don't need to be recorded as we'll handle them correctly when + # they come into range for the first time. + rooms_no_receipts = [ + room_id + for room_id, room_status in previous_connection_state.receipts._statuses.items() + if room_status.status == HaveSentRoomFlag.LIVE + and room_id not in relevant_room_ids + ] + changed_rooms = await self.store.get_rooms_with_receipts_between( + rooms_no_receipts, + from_key=from_token.stream_token.receipt_key, + to_key=to_token.receipt_key, + ) + new_connection_state.receipts.record_unsent_rooms( + changed_rooms, from_token.stream_token.receipt_key + ) + + return SlidingSyncResult.Extensions.ReceiptsExtension( + room_id_to_receipt_map=room_id_to_receipt_map, + ) + + async def get_typing_extension_response( + self, + sync_config: SlidingSyncConfig, + actual_lists: Mapping[str, SlidingSyncResult.SlidingWindowList], + actual_room_ids: Set[str], + actual_room_response_map: Mapping[str, SlidingSyncResult.RoomResult], + typing_request: SlidingSyncConfig.Extensions.TypingExtension, + to_token: StreamToken, + from_token: Optional[SlidingSyncStreamToken], + ) -> Optional[SlidingSyncResult.Extensions.TypingExtension]: + """Handle Typing Notification extension (MSC3961) + + Args: + sync_config: Sync configuration + actual_lists: Sliding window API. A map of list key to list results in the + Sliding Sync response. + actual_room_ids: The actual room IDs in the the Sliding Sync response. + actual_room_response_map: A map of room ID to room results in the the + Sliding Sync response. + account_data_request: The account_data extension from the request + to_token: The point in the stream to sync up to. + from_token: The point in the stream to sync from. + """ + # Skip if the extension is not enabled + if not typing_request.enabled: + return None + + relevant_room_ids = self.find_relevant_room_ids_for_extension( + requested_lists=typing_request.lists, + requested_room_ids=typing_request.rooms, + actual_lists=actual_lists, + actual_room_ids=actual_room_ids, + ) + + room_id_to_typing_map: Dict[str, JsonMapping] = {} + if len(relevant_room_ids) > 0: + # Note: We don't need to take connection tracking into account for typing + # notifications because they'll get anything still relevant and hasn't timed + # out when the room comes into range. We consider the gap where the room + # fell out of range, as long enough for any typing notifications to have + # timed out (it's not worth the 30 seconds of data we may have missed). + typing_source = self.event_sources.sources.typing + typing_notifications, _ = await typing_source.get_new_events( + user=sync_config.user, + from_key=(from_token.stream_token.typing_key if from_token else 0), + to_key=to_token.typing_key, + # This is a dummy value and isn't used in the function + limit=0, + room_ids=relevant_room_ids, + is_guest=False, + ) + + for typing_notification in typing_notifications: + # These fields should exist for every typing notification + room_id = typing_notification["room_id"] + type = typing_notification["type"] + content = typing_notification["content"] + + room_id_to_typing_map[room_id] = {"type": type, "content": content} + + return SlidingSyncResult.Extensions.TypingExtension( + room_id_to_typing_map=room_id_to_typing_map, + ) diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py new file mode 100644
index 0000000000..13e69f18a0 --- /dev/null +++ b/synapse/handlers/sliding_sync/room_lists.py
@@ -0,0 +1,2304 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + + +import enum +import logging +from itertools import chain +from typing import ( + TYPE_CHECKING, + AbstractSet, + Dict, + List, + Literal, + Mapping, + Optional, + Set, + Tuple, + Union, + cast, +) + +import attr +from immutabledict import immutabledict +from typing_extensions import assert_never + +from synapse.api.constants import ( + AccountDataTypes, + EventContentFields, + EventTypes, + Membership, +) +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.events import StrippedStateEvent +from synapse.events.utils import parse_stripped_state_event +from synapse.logging.opentracing import start_active_span, trace +from synapse.storage.databases.main.state import ( + ROOM_UNKNOWN_SENTINEL, + Sentinel as StateSentinel, +) +from synapse.storage.databases.main.stream import CurrentStateDeltaMembership +from synapse.storage.invite_rule import InviteRule +from synapse.storage.roommember import ( + RoomsForUser, + RoomsForUserSlidingSync, + RoomsForUserStateReset, +) +from synapse.types import ( + MutableStateMap, + RoomStreamToken, + StateMap, + StrCollection, + StreamKeyType, + StreamToken, + UserID, +) +from synapse.types.handlers.sliding_sync import ( + HaveSentRoomFlag, + OperationType, + PerConnectionState, + RoomSyncConfig, + SlidingSyncConfig, + SlidingSyncResult, +) +from synapse.types.state import StateFilter + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +logger = logging.getLogger(__name__) + + +class Sentinel(enum.Enum): + # defining a sentinel in this way allows mypy to correctly handle the + # type of a dictionary lookup and subsequent type narrowing. + UNSET_SENTINEL = object() + + +# Helper definition for the types that we might return. We do this to avoid +# copying data between types (which can be expensive for many rooms). +RoomsForUserType = Union[RoomsForUserStateReset, RoomsForUser, RoomsForUserSlidingSync] + + +@attr.s(auto_attribs=True, slots=True, frozen=True) +class SlidingSyncInterestedRooms: + """The set of rooms and metadata a client is interested in based on their + sliding sync request. + + Returned by `compute_interested_rooms`. + + Attributes: + lists: A mapping from list name to the list result for the response + relevant_room_map: A map from rooms that match the sync request to + their room sync config. + relevant_rooms_to_send_map: Subset of `relevant_room_map` that + includes the rooms that *may* have relevant updates. Rooms not + in this map will definitely not have room updates (though + extensions may have updates in these rooms). + newly_joined_rooms: The set of rooms that were joined in the token range + and the user is still joined to at the end of this range. + newly_left_rooms: The set of rooms that we left in the token range + and are still "leave" at the end of this range. + dm_room_ids: The set of rooms the user consider as direct-message (DM) rooms + """ + + lists: Mapping[str, SlidingSyncResult.SlidingWindowList] + relevant_room_map: Mapping[str, RoomSyncConfig] + relevant_rooms_to_send_map: Mapping[str, RoomSyncConfig] + all_rooms: Set[str] + room_membership_for_user_map: Mapping[str, RoomsForUserType] + + newly_joined_rooms: AbstractSet[str] + newly_left_rooms: AbstractSet[str] + dm_room_ids: AbstractSet[str] + + @staticmethod + def empty() -> "SlidingSyncInterestedRooms": + return SlidingSyncInterestedRooms( + lists={}, + relevant_room_map={}, + relevant_rooms_to_send_map={}, + all_rooms=set(), + room_membership_for_user_map={}, + newly_joined_rooms=set(), + newly_left_rooms=set(), + dm_room_ids=set(), + ) + + +def filter_membership_for_sync( + *, + user_id: str, + room_membership_for_user: RoomsForUserType, + newly_left: bool, +) -> bool: + """ + Returns True if the membership event should be included in the sync response, + otherwise False. + + Attributes: + user_id: The user ID that the membership applies to + room_membership_for_user: Membership information for the user in the room + """ + + membership = room_membership_for_user.membership + sender = room_membership_for_user.sender + + # We want to allow everything except rooms the user has left unless `newly_left` + # because we want everything that's *still* relevant to the user. We include + # `newly_left` rooms because the last event that the user should see is their own + # leave event. + # + # A leave != kick. This logic includes kicks (leave events where the sender is not + # the same user). + # + # When `sender=None`, it means that a state reset happened that removed the user + # from the room without a corresponding leave event. We can just remove the rooms + # since they are no longer relevant to the user but will still appear if they are + # `newly_left`. + return ( + # Anything except leave events + membership != Membership.LEAVE + # Unless... + or newly_left + # Allow kicks + or (membership == Membership.LEAVE and sender not in (user_id, None)) + ) + + +class SlidingSyncRoomLists: + """Handles calculating the room lists from sliding sync requests""" + + def __init__(self, hs: "HomeServer"): + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync + self.is_mine_id = hs.is_mine_id + + async def compute_interested_rooms( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> SlidingSyncInterestedRooms: + """Fetch the set of rooms that match the request""" + has_lists = sync_config.lists is not None and len(sync_config.lists) > 0 + has_room_subscriptions = ( + sync_config.room_subscriptions is not None + and len(sync_config.room_subscriptions) > 0 + ) + + if not has_lists and not has_room_subscriptions: + return SlidingSyncInterestedRooms.empty() + + if await self.store.have_finished_sliding_sync_background_jobs(): + return await self._compute_interested_rooms_new_tables( + sync_config=sync_config, + previous_connection_state=previous_connection_state, + to_token=to_token, + from_token=from_token, + ) + else: + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + return await self._compute_interested_rooms_fallback( + sync_config=sync_config, + previous_connection_state=previous_connection_state, + to_token=to_token, + from_token=from_token, + ) + + @trace + async def _compute_interested_rooms_new_tables( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> SlidingSyncInterestedRooms: + """Implementation of `compute_interested_rooms` using new sliding sync db tables.""" + user_id = sync_config.user.to_string() + + # Assemble sliding window lists + lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} + # Keep track of the rooms that we can display and need to fetch more info about + relevant_room_map: Dict[str, RoomSyncConfig] = {} + # The set of room IDs of all rooms that could appear in any list. These + # include rooms that are outside the list ranges. + all_rooms: Set[str] = set() + + # Note: this won't include rooms the user has left themselves. We add back + # `newly_left` rooms below. This is more efficient than fetching all rooms and + # then filtering out the old left rooms. + room_membership_for_user_map = ( + await self.store.get_sliding_sync_rooms_for_user_from_membership_snapshots( + user_id + ) + ) + # To play nice with the rewind logic below, we need to go fetch the rooms the + # user has left themselves but only if it changed after the `to_token`. + # + # If a leave happens *after* the token range, we may have still been joined (or + # any non-self-leave which is relevant to sync) to the room before so we need to + # include it in the list of potentially relevant rooms and apply our rewind + # logic (outside of this function) to see if it's actually relevant. + # + # We do this separately from + # `get_sliding_sync_rooms_for_user_from_membership_snapshots` as those results + # are cached and the `to_token` isn't very cache friendly (people are constantly + # requesting with new tokens) so we separate it out here. + self_leave_room_membership_for_user_map = ( + await self.store.get_sliding_sync_self_leave_rooms_after_to_token( + user_id, to_token + ) + ) + if self_leave_room_membership_for_user_map: + # FIXME: It would be nice to avoid this copy but since + # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it + # can't return a mutable value like a `dict`. We make the copy to get a + # mutable dict that we can change. We try to only make a copy when necessary + # (if we actually need to change something) as in most cases, the logic + # doesn't need to run. + room_membership_for_user_map = dict(room_membership_for_user_map) + room_membership_for_user_map.update(self_leave_room_membership_for_user_map) + + # Remove invites from ignored users + ignored_users = await self.store.ignored_users(user_id) + invite_config = await self.store.get_invite_config_for_user(user_id) + if ignored_users: + # FIXME: It would be nice to avoid this copy but since + # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it + # can't return a mutable value like a `dict`. We make the copy to get a + # mutable dict that we can change. We try to only make a copy when necessary + # (if we actually need to change something) as in most cases, the logic + # doesn't need to run. + room_membership_for_user_map = dict(room_membership_for_user_map) + # Make a copy so we don't run into an error: `dictionary changed size during + # iteration`, when we remove items + for room_id in list(room_membership_for_user_map.keys()): + room_for_user_sliding_sync = room_membership_for_user_map[room_id] + if ( + room_for_user_sliding_sync.membership == Membership.INVITE + and room_for_user_sliding_sync.sender + and ( + room_for_user_sliding_sync.sender in ignored_users + or invite_config.get_invite_rule( + room_for_user_sliding_sync.sender + ) + == InviteRule.IGNORE + ) + ): + room_membership_for_user_map.pop(room_id, None) + + ( + newly_joined_room_ids, + newly_left_room_map, + ) = await self._get_newly_joined_and_left_rooms( + user_id, from_token=from_token, to_token=to_token + ) + + changes = await self._get_rewind_changes_to_current_membership_to_token( + sync_config.user, room_membership_for_user_map, to_token=to_token + ) + if changes: + # FIXME: It would be nice to avoid this copy but since + # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it + # can't return a mutable value like a `dict`. We make the copy to get a + # mutable dict that we can change. We try to only make a copy when necessary + # (if we actually need to change something) as in most cases, the logic + # doesn't need to run. + room_membership_for_user_map = dict(room_membership_for_user_map) + for room_id, change in changes.items(): + if change is None: + # Remove rooms that the user joined after the `to_token` + room_membership_for_user_map.pop(room_id, None) + continue + + existing_room = room_membership_for_user_map.get(room_id) + if existing_room is not None: + # Update room membership events to the point in time of the `to_token` + room_for_user = RoomsForUserSlidingSync( + room_id=room_id, + sender=change.sender, + membership=change.membership, + event_id=change.event_id, + event_pos=change.event_pos, + room_version_id=change.room_version_id, + # We keep the state of the room though + has_known_state=existing_room.has_known_state, + room_type=existing_room.room_type, + is_encrypted=existing_room.is_encrypted, + ) + if filter_membership_for_sync( + user_id=user_id, + room_membership_for_user=room_for_user, + newly_left=room_id in newly_left_room_map, + ): + room_membership_for_user_map[room_id] = room_for_user + else: + room_membership_for_user_map.pop(room_id, None) + + # Add back `newly_left` rooms (rooms left in the from -> to token range). + # + # We do this because `get_sliding_sync_rooms_for_user_from_membership_snapshots(...)` doesn't include + # rooms that the user left themselves as it's more efficient to add them back + # here than to fetch all rooms and then filter out the old left rooms. The user + # only leaves a room once in a blue moon so this barely needs to run. + # + missing_newly_left_rooms = ( + newly_left_room_map.keys() - room_membership_for_user_map.keys() + ) + if missing_newly_left_rooms: + # FIXME: It would be nice to avoid this copy but since + # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it + # can't return a mutable value like a `dict`. We make the copy to get a + # mutable dict that we can change. We try to only make a copy when necessary + # (if we actually need to change something) as in most cases, the logic + # doesn't need to run. + room_membership_for_user_map = dict(room_membership_for_user_map) + for room_id in missing_newly_left_rooms: + newly_left_room_for_user = newly_left_room_map[room_id] + # This should be a given + assert newly_left_room_for_user.membership == Membership.LEAVE + + # Add back `newly_left` rooms + # + # Check for membership and state in the Sliding Sync tables as it's just + # another membership + newly_left_room_for_user_sliding_sync = ( + await self.store.get_sliding_sync_room_for_user(user_id, room_id) + ) + # If the membership exists, it's just a normal user left the room on + # their own + if newly_left_room_for_user_sliding_sync is not None: + if filter_membership_for_sync( + user_id=user_id, + room_membership_for_user=newly_left_room_for_user_sliding_sync, + newly_left=room_id in newly_left_room_map, + ): + room_membership_for_user_map[room_id] = ( + newly_left_room_for_user_sliding_sync + ) + else: + room_membership_for_user_map.pop(room_id, None) + + change = changes.get(room_id) + if change is not None: + # Update room membership events to the point in time of the `to_token` + room_for_user = RoomsForUserSlidingSync( + room_id=room_id, + sender=change.sender, + membership=change.membership, + event_id=change.event_id, + event_pos=change.event_pos, + room_version_id=change.room_version_id, + # We keep the state of the room though + has_known_state=newly_left_room_for_user_sliding_sync.has_known_state, + room_type=newly_left_room_for_user_sliding_sync.room_type, + is_encrypted=newly_left_room_for_user_sliding_sync.is_encrypted, + ) + if filter_membership_for_sync( + user_id=user_id, + room_membership_for_user=room_for_user, + newly_left=room_id in newly_left_room_map, + ): + room_membership_for_user_map[room_id] = room_for_user + else: + room_membership_for_user_map.pop(room_id, None) + + # If we are `newly_left` from the room but can't find any membership, + # then we have been "state reset" out of the room + else: + # Get the state at the time. We can't read from the Sliding Sync + # tables because the user has no membership in the room according to + # the state (thanks to the state reset). + # + # Note: `room_type` never changes, so we can just get current room + # type + room_type = await self.store.get_room_type(room_id) + has_known_state = room_type is not ROOM_UNKNOWN_SENTINEL + if isinstance(room_type, StateSentinel): + room_type = None + + # Get the encryption status at the time of the token + is_encrypted = await self.get_is_encrypted_for_room_at_token( + room_id, + newly_left_room_for_user.event_pos.to_room_stream_token(), + ) + + room_for_user = RoomsForUserSlidingSync( + room_id=room_id, + sender=newly_left_room_for_user.sender, + membership=newly_left_room_for_user.membership, + event_id=newly_left_room_for_user.event_id, + event_pos=newly_left_room_for_user.event_pos, + room_version_id=newly_left_room_for_user.room_version_id, + has_known_state=has_known_state, + room_type=room_type, + is_encrypted=is_encrypted, + ) + if filter_membership_for_sync( + user_id=user_id, + room_membership_for_user=room_for_user, + newly_left=room_id in newly_left_room_map, + ): + room_membership_for_user_map[room_id] = room_for_user + else: + room_membership_for_user_map.pop(room_id, None) + + dm_room_ids = await self._get_dm_rooms_for_user(user_id) + + if sync_config.lists: + sync_room_map = room_membership_for_user_map + with start_active_span("assemble_sliding_window_lists"): + for list_key, list_config in sync_config.lists.items(): + # Apply filters + filtered_sync_room_map = sync_room_map + if list_config.filters is not None: + filtered_sync_room_map = await self.filter_rooms_using_tables( + user_id, + sync_room_map, + previous_connection_state, + list_config.filters, + to_token, + dm_room_ids, + ) + + # Find which rooms are partially stated and may need to be filtered out + # depending on the `required_state` requested (see below). + partial_state_rooms = await self.store.get_partial_rooms() + + # Since creating the `RoomSyncConfig` takes some work, let's just do it + # once. + room_sync_config = RoomSyncConfig.from_room_config(list_config) + + # Exclude partially-stated rooms if we must wait for the room to be + # fully-stated + if room_sync_config.must_await_full_state(self.is_mine_id): + filtered_sync_room_map = { + room_id: room + for room_id, room in filtered_sync_room_map.items() + if room_id not in partial_state_rooms + } + + all_rooms.update(filtered_sync_room_map) + + ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] + + if list_config.ranges: + # Optimization: If we are asking for the full range, we don't + # need to sort the list. + if ( + # We're looking for a single range that covers the entire list + len(list_config.ranges) == 1 + # Range starts at 0 + and list_config.ranges[0][0] == 0 + # And the range extends to the end of the list or more. Each + # side is inclusive. + and list_config.ranges[0][1] + >= len(filtered_sync_room_map) - 1 + ): + sorted_room_info: List[RoomsForUserType] = list( + filtered_sync_room_map.values() + ) + else: + # Sort the list + sorted_room_info = await self.sort_rooms( + # Cast is safe because RoomsForUserSlidingSync is part + # of the `RoomsForUserType` union. Why can't it detect this? + cast( + Dict[str, RoomsForUserType], filtered_sync_room_map + ), + to_token, + # We only need to sort the rooms up to the end + # of the largest range. Both sides of range are + # inclusive so we `+ 1`. + limit=max(range[1] + 1 for range in list_config.ranges), + ) + + for range in list_config.ranges: + room_ids_in_list: List[str] = [] + + # We're going to loop through the sorted list of rooms starting + # at the range start index and keep adding rooms until we fill + # up the range or run out of rooms. + # + # Both sides of range are inclusive so we `+ 1` + max_num_rooms = range[1] - range[0] + 1 + for room_membership in sorted_room_info[range[0] :]: + room_id = room_membership.room_id + + if len(room_ids_in_list) >= max_num_rooms: + break + + # Take the superset of the `RoomSyncConfig` for each room. + # + # Update our `relevant_room_map` with the room we're going + # to display and need to fetch more info about. + existing_room_sync_config = relevant_room_map.get( + room_id + ) + if existing_room_sync_config is not None: + room_sync_config = existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) + + relevant_room_map[room_id] = room_sync_config + + room_ids_in_list.append(room_id) + + ops.append( + SlidingSyncResult.SlidingWindowList.Operation( + op=OperationType.SYNC, + range=range, + room_ids=room_ids_in_list, + ) + ) + + lists[list_key] = SlidingSyncResult.SlidingWindowList( + count=len(filtered_sync_room_map), + ops=ops, + ) + + if sync_config.room_subscriptions: + with start_active_span("assemble_room_subscriptions"): + # FIXME: It would be nice to avoid this copy but since + # `get_sliding_sync_rooms_for_user_from_membership_snapshots` is cached, it + # can't return a mutable value like a `dict`. We make the copy to get a + # mutable dict that we can change. We try to only make a copy when necessary + # (if we actually need to change something) as in most cases, the logic + # doesn't need to run. + room_membership_for_user_map = dict(room_membership_for_user_map) + + # Find which rooms are partially stated and may need to be filtered out + # depending on the `required_state` requested (see below). + partial_state_rooms = await self.store.get_partial_rooms() + + # Fetch any rooms that we have not already fetched from the database. + subscription_sliding_sync_rooms = ( + await self.store.get_sliding_sync_room_for_user_batch( + user_id, + sync_config.room_subscriptions.keys() + - room_membership_for_user_map.keys(), + ) + ) + room_membership_for_user_map.update(subscription_sliding_sync_rooms) + + for ( + room_id, + room_subscription, + ) in sync_config.room_subscriptions.items(): + # Check if we have a membership for the room, but didn't pull it out + # above. This could be e.g. a leave that we don't pull out by + # default. + current_room_entry = room_membership_for_user_map.get(room_id) + if not current_room_entry: + # TODO: Handle rooms the user isn't in. + continue + + all_rooms.add(room_id) + + # Take the superset of the `RoomSyncConfig` for each room. + room_sync_config = RoomSyncConfig.from_room_config( + room_subscription + ) + + # Exclude partially-stated rooms if we must wait for the room to be + # fully-stated + if room_sync_config.must_await_full_state(self.is_mine_id): + if room_id in partial_state_rooms: + continue + + # Update our `relevant_room_map` with the room we're going to display + # and need to fetch more info about. + existing_room_sync_config = relevant_room_map.get(room_id) + if existing_room_sync_config is not None: + room_sync_config = ( + existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) + ) + + relevant_room_map[room_id] = room_sync_config + + # Filtered subset of `relevant_room_map` for rooms that may have updates + # (in the event stream) + relevant_rooms_to_send_map = await self._filter_relevant_rooms_to_send( + previous_connection_state, from_token, relevant_room_map + ) + + return SlidingSyncInterestedRooms( + lists=lists, + relevant_room_map=relevant_room_map, + relevant_rooms_to_send_map=relevant_rooms_to_send_map, + all_rooms=all_rooms, + room_membership_for_user_map=room_membership_for_user_map, + newly_joined_rooms=newly_joined_room_ids, + newly_left_rooms=set(newly_left_room_map), + dm_room_ids=dm_room_ids, + ) + + async def _compute_interested_rooms_fallback( + self, + sync_config: SlidingSyncConfig, + previous_connection_state: "PerConnectionState", + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> SlidingSyncInterestedRooms: + """Fallback code when the database background updates haven't completed yet.""" + + ( + room_membership_for_user_map, + newly_joined_room_ids, + newly_left_room_ids, + ) = await self.get_room_membership_for_user_at_to_token( + sync_config.user, to_token, from_token + ) + + dm_room_ids = await self._get_dm_rooms_for_user(sync_config.user.to_string()) + + # Assemble sliding window lists + lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {} + # Keep track of the rooms that we can display and need to fetch more info about + relevant_room_map: Dict[str, RoomSyncConfig] = {} + # The set of room IDs of all rooms that could appear in any list. These + # include rooms that are outside the list ranges. + all_rooms: Set[str] = set() + + if sync_config.lists: + with start_active_span("assemble_sliding_window_lists"): + sync_room_map = await self.filter_rooms_relevant_for_sync( + user=sync_config.user, + room_membership_for_user_map=room_membership_for_user_map, + newly_left_room_ids=newly_left_room_ids, + ) + + for list_key, list_config in sync_config.lists.items(): + # Apply filters + filtered_sync_room_map = sync_room_map + if list_config.filters is not None: + filtered_sync_room_map = await self.filter_rooms( + sync_config.user, + sync_room_map, + previous_connection_state, + list_config.filters, + to_token, + dm_room_ids, + ) + + # Find which rooms are partially stated and may need to be filtered out + # depending on the `required_state` requested (see below). + partial_state_rooms = await self.store.get_partial_rooms() + + # Since creating the `RoomSyncConfig` takes some work, let's just do it + # once. + room_sync_config = RoomSyncConfig.from_room_config(list_config) + + # Exclude partially-stated rooms if we must wait for the room to be + # fully-stated + if room_sync_config.must_await_full_state(self.is_mine_id): + filtered_sync_room_map = { + room_id: room + for room_id, room in filtered_sync_room_map.items() + if room_id not in partial_state_rooms + } + + all_rooms.update(filtered_sync_room_map) + + # Sort the list + sorted_room_info = await self.sort_rooms( + filtered_sync_room_map, to_token + ) + + ops: List[SlidingSyncResult.SlidingWindowList.Operation] = [] + if list_config.ranges: + for range in list_config.ranges: + room_ids_in_list: List[str] = [] + + # We're going to loop through the sorted list of rooms starting + # at the range start index and keep adding rooms until we fill + # up the range or run out of rooms. + # + # Both sides of range are inclusive so we `+ 1` + max_num_rooms = range[1] - range[0] + 1 + for room_membership in sorted_room_info[range[0] :]: + room_id = room_membership.room_id + + if len(room_ids_in_list) >= max_num_rooms: + break + + # Take the superset of the `RoomSyncConfig` for each room. + # + # Update our `relevant_room_map` with the room we're going + # to display and need to fetch more info about. + existing_room_sync_config = relevant_room_map.get( + room_id + ) + if existing_room_sync_config is not None: + room_sync_config = existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) + + relevant_room_map[room_id] = room_sync_config + + room_ids_in_list.append(room_id) + + ops.append( + SlidingSyncResult.SlidingWindowList.Operation( + op=OperationType.SYNC, + range=range, + room_ids=room_ids_in_list, + ) + ) + + lists[list_key] = SlidingSyncResult.SlidingWindowList( + count=len(sorted_room_info), + ops=ops, + ) + + if sync_config.room_subscriptions: + with start_active_span("assemble_room_subscriptions"): + # Find which rooms are partially stated and may need to be filtered out + # depending on the `required_state` requested (see below). + partial_state_rooms = await self.store.get_partial_rooms() + + for ( + room_id, + room_subscription, + ) in sync_config.room_subscriptions.items(): + room_membership_for_user_at_to_token = ( + await self.check_room_subscription_allowed_for_user( + room_id=room_id, + room_membership_for_user_map=room_membership_for_user_map, + to_token=to_token, + ) + ) + + # Skip this room if the user isn't allowed to see it + if not room_membership_for_user_at_to_token: + continue + + all_rooms.add(room_id) + + room_membership_for_user_map[room_id] = ( + room_membership_for_user_at_to_token + ) + + # Take the superset of the `RoomSyncConfig` for each room. + room_sync_config = RoomSyncConfig.from_room_config( + room_subscription + ) + + # Exclude partially-stated rooms if we must wait for the room to be + # fully-stated + if room_sync_config.must_await_full_state(self.is_mine_id): + if room_id in partial_state_rooms: + continue + + all_rooms.add(room_id) + + # Update our `relevant_room_map` with the room we're going to display + # and need to fetch more info about. + existing_room_sync_config = relevant_room_map.get(room_id) + if existing_room_sync_config is not None: + room_sync_config = ( + existing_room_sync_config.combine_room_sync_config( + room_sync_config + ) + ) + + relevant_room_map[room_id] = room_sync_config + + # Filtered subset of `relevant_room_map` for rooms that may have updates + # (in the event stream) + relevant_rooms_to_send_map = await self._filter_relevant_rooms_to_send( + previous_connection_state, from_token, relevant_room_map + ) + + return SlidingSyncInterestedRooms( + lists=lists, + relevant_room_map=relevant_room_map, + relevant_rooms_to_send_map=relevant_rooms_to_send_map, + all_rooms=all_rooms, + room_membership_for_user_map=room_membership_for_user_map, + newly_joined_rooms=newly_joined_room_ids, + newly_left_rooms=newly_left_room_ids, + dm_room_ids=dm_room_ids, + ) + + async def _filter_relevant_rooms_to_send( + self, + previous_connection_state: PerConnectionState, + from_token: Optional[StreamToken], + relevant_room_map: Dict[str, RoomSyncConfig], + ) -> Dict[str, RoomSyncConfig]: + """Filters the `relevant_room_map` down to those rooms that may have + updates we need to fetch and return.""" + + # Filtered subset of `relevant_room_map` for rooms that may have updates + # (in the event stream) + relevant_rooms_to_send_map: Dict[str, RoomSyncConfig] = relevant_room_map + if relevant_room_map: + with start_active_span("filter_relevant_rooms_to_send"): + if from_token: + rooms_should_send = set() + + # First we check if there are rooms that match a list/room + # subscription and have updates we need to send (i.e. either because + # we haven't sent the room down, or we have but there are missing + # updates). + for room_id, room_config in relevant_room_map.items(): + prev_room_sync_config = ( + previous_connection_state.room_configs.get(room_id) + ) + if prev_room_sync_config is not None: + # Always include rooms whose timeline limit has increased. + # (see the "XXX: Odd behavior" described below) + if ( + prev_room_sync_config.timeline_limit + < room_config.timeline_limit + ): + rooms_should_send.add(room_id) + continue + + status = previous_connection_state.rooms.have_sent_room(room_id) + if ( + # The room was never sent down before so the client needs to know + # about it regardless of any updates. + status.status == HaveSentRoomFlag.NEVER + # `PREVIOUSLY` literally means the "room was sent down before *AND* + # there are updates we haven't sent down" so we already know this + # room has updates. + or status.status == HaveSentRoomFlag.PREVIOUSLY + ): + rooms_should_send.add(room_id) + elif status.status == HaveSentRoomFlag.LIVE: + # We know that we've sent all updates up until `from_token`, + # so we just need to check if there have been updates since + # then. + pass + else: + assert_never(status.status) + + # We only need to check for new events since any state changes + # will also come down as new events. + rooms_that_have_updates = ( + self.store.get_rooms_that_might_have_updates( + relevant_room_map.keys(), from_token.room_key + ) + ) + rooms_should_send.update(rooms_that_have_updates) + relevant_rooms_to_send_map = { + room_id: room_sync_config + for room_id, room_sync_config in relevant_room_map.items() + if room_id in rooms_should_send + } + + return relevant_rooms_to_send_map + + @trace + async def _get_rewind_changes_to_current_membership_to_token( + self, + user: UserID, + rooms_for_user: Mapping[str, RoomsForUserType], + to_token: StreamToken, + ) -> Mapping[str, Optional[RoomsForUser]]: + """ + Takes the current set of rooms for a user (retrieved after the given + token), and returns the changes needed to "rewind" it to match the set of + memberships *at that token* (<= `to_token`). + + Args: + user: User to fetch rooms for + rooms_for_user: The set of rooms for the user after the `to_token`. + to_token: The token to rewind to + + Returns: + The changes to apply to rewind the the current memberships. + """ + # If the user has never joined any rooms before, we can just return an empty list + if not rooms_for_user: + return {} + + user_id = user.to_string() + + # Get the `RoomStreamToken` that represents the spot we queried up to when we got + # our membership snapshot from `get_rooms_for_local_user_where_membership_is()`. + # + # First, we need to get the max stream_ordering of each event persister instance + # that we queried events from. + instance_to_max_stream_ordering_map: Dict[str, int] = {} + for room_for_user in rooms_for_user.values(): + instance_name = room_for_user.event_pos.instance_name + stream_ordering = room_for_user.event_pos.stream + + current_instance_max_stream_ordering = ( + instance_to_max_stream_ordering_map.get(instance_name) + ) + if ( + current_instance_max_stream_ordering is None + or stream_ordering > current_instance_max_stream_ordering + ): + instance_to_max_stream_ordering_map[instance_name] = stream_ordering + + # Then assemble the `RoomStreamToken` + min_stream_pos = min(instance_to_max_stream_ordering_map.values()) + membership_snapshot_token = RoomStreamToken( + # Minimum position in the `instance_map` + stream=min_stream_pos, + instance_map=immutabledict( + { + instance_name: stream_pos + for instance_name, stream_pos in instance_to_max_stream_ordering_map.items() + if stream_pos > min_stream_pos + } + ), + ) + + # Since we fetched the users room list at some point in time after the + # tokens, we need to revert/rewind some membership changes to match the point in + # time of the `to_token`. In particular, we need to make these fixups: + # + # - a) Remove rooms that the user joined after the `to_token` + # - b) Update room membership events to the point in time of the `to_token` + + # Fetch membership changes that fall in the range from `to_token` up to + # `membership_snapshot_token` + # + # If our `to_token` is already the same or ahead of the latest room membership + # for the user, we don't need to do any "2)" fix-ups and can just straight-up + # use the room list from the snapshot as a base (nothing has changed) + current_state_delta_membership_changes_after_to_token = [] + if not membership_snapshot_token.is_before_or_eq(to_token.room_key): + current_state_delta_membership_changes_after_to_token = ( + await self.store.get_current_state_delta_membership_changes_for_user( + user_id, + from_key=to_token.room_key, + to_key=membership_snapshot_token, + excluded_room_ids=self.rooms_to_exclude_globally, + ) + ) + + if not current_state_delta_membership_changes_after_to_token: + # There have been no membership changes, so we can early return. + return {} + + # Otherwise we're about to make changes to `rooms_for_user`, so we turn + # it into a mutable dict. + changes: Dict[str, Optional[RoomsForUser]] = {} + + # Assemble a list of the first membership event after the `to_token` so we can + # step backward to the previous membership that would apply to the from/to + # range. + first_membership_change_by_room_id_after_to_token: Dict[ + str, CurrentStateDeltaMembership + ] = {} + for membership_change in current_state_delta_membership_changes_after_to_token: + # Only set if we haven't already set it + first_membership_change_by_room_id_after_to_token.setdefault( + membership_change.room_id, membership_change + ) + + # Since we fetched a snapshot of the users room list at some point in time after + # the from/to tokens, we need to revert/rewind some membership changes to match + # the point in time of the `to_token`. + for ( + room_id, + first_membership_change_after_to_token, + ) in first_membership_change_by_room_id_after_to_token.items(): + # 1a) Remove rooms that the user joined after the `to_token` + if first_membership_change_after_to_token.prev_event_id is None: + changes[room_id] = None + # 1b) 1c) From the first membership event after the `to_token`, step backward to the + # previous membership that would apply to the from/to range. + else: + # We don't expect these fields to be `None` if we have a `prev_event_id` + # but we're being defensive since it's possible that the prev event was + # culled from the database. + if ( + first_membership_change_after_to_token.prev_event_pos is not None + and first_membership_change_after_to_token.prev_membership + is not None + and first_membership_change_after_to_token.prev_sender is not None + ): + # We need to know the room version ID, which we normally we + # can get from the current membership, but if we don't have + # that then we need to query the DB. + current_membership = rooms_for_user.get(room_id) + if current_membership is not None: + room_version_id = current_membership.room_version_id + else: + room_version_id = await self.store.get_room_version_id(room_id) + + changes[room_id] = RoomsForUser( + room_id=room_id, + event_id=first_membership_change_after_to_token.prev_event_id, + event_pos=first_membership_change_after_to_token.prev_event_pos, + membership=first_membership_change_after_to_token.prev_membership, + sender=first_membership_change_after_to_token.prev_sender, + room_version_id=room_version_id, + ) + else: + # If we can't find the previous membership event, we shouldn't + # include the room in the sync response since we can't determine the + # exact membership state and shouldn't rely on the current snapshot. + changes[room_id] = None + + return changes + + @trace + async def get_room_membership_for_user_at_to_token( + self, + user: UserID, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: + """ + Fetch room IDs that the user has had membership in (the full room list including + long-lost left rooms that will be filtered, sorted, and sliced). + + We're looking for rooms where the user has had any sort of membership in the + token range (> `from_token` and <= `to_token`) + + In order for bans/kicks to not show up, you need to `/forget` those rooms. This + doesn't modify the event itself though and only adds the `forgotten` flag to the + `room_memberships` table in Synapse. There isn't a way to tell when a room was + forgotten at the moment so we can't factor it into the token range. + + Args: + user: User to fetch rooms for + to_token: The token to fetch rooms up to. + from_token: The point in the stream to sync from. + + Returns: + A 3-tuple of: + - A dictionary of room IDs that the user has had membership in along with + membership information in that room at the time of `to_token`. + - Set of newly joined rooms + - Set of newly left rooms + """ + user_id = user.to_string() + + # First grab a current snapshot rooms for the user + # (also handles forgotten rooms) + room_for_user_list = await self.store.get_rooms_for_local_user_where_membership_is( + user_id=user_id, + # We want to fetch any kind of membership (joined and left rooms) in order + # to get the `event_pos` of the latest room membership event for the + # user. + membership_list=Membership.LIST, + excluded_rooms=self.rooms_to_exclude_globally, + ) + + # We filter out unknown room versions before we try and load any + # metadata about the room. They shouldn't go down sync anyway, and their + # metadata may be in a broken state. + room_for_user_list = [ + room_for_user + for room_for_user in room_for_user_list + if room_for_user.room_version_id in KNOWN_ROOM_VERSIONS + ] + + # Remove invites from ignored users + ignored_users = await self.store.ignored_users(user_id) + if ignored_users: + room_for_user_list = [ + room_for_user + for room_for_user in room_for_user_list + if not ( + room_for_user.membership == Membership.INVITE + and room_for_user.sender in ignored_users + ) + ] + + ( + newly_joined_room_ids, + newly_left_room_map, + ) = await self._get_newly_joined_and_left_rooms_fallback( + user_id, to_token=to_token, from_token=from_token + ) + + # If the user has never joined any rooms before, we can just return an empty + # list. We also have to check the `newly_left_room_map` in case someone was + # state reset out of all of the rooms they were in. + if not room_for_user_list and not newly_left_room_map: + return {}, set(), set() + + # Since we fetched the users room list at some point in time after the + # tokens, we need to revert/rewind some membership changes to match the point in + # time of the `to_token`. + rooms_for_user: Dict[str, RoomsForUserType] = { + room.room_id: room for room in room_for_user_list + } + changes = await self._get_rewind_changes_to_current_membership_to_token( + user, rooms_for_user, to_token + ) + for room_id, change_room_for_user in changes.items(): + if change_room_for_user is None: + rooms_for_user.pop(room_id, None) + else: + rooms_for_user[room_id] = change_room_for_user + + # Ensure we have entries for rooms that the user has been "state reset" + # out of. These are rooms appear in the `newly_left_rooms` map but + # aren't in the `rooms_for_user` map. + for room_id, newly_left_room_for_user in newly_left_room_map.items(): + # If we already know about the room, it's not a state reset + if room_id in rooms_for_user: + continue + + # This should be true if it's a state reset + assert newly_left_room_for_user.membership is Membership.LEAVE + assert newly_left_room_for_user.event_id is None + assert newly_left_room_for_user.sender is None + + rooms_for_user[room_id] = newly_left_room_for_user + + return rooms_for_user, newly_joined_room_ids, set(newly_left_room_map) + + @trace + async def _get_newly_joined_and_left_rooms( + self, + user_id: str, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]: + """Fetch the sets of rooms that the user newly joined or left in the + given token range. + + Note: there may be rooms in the newly left rooms where the user was + "state reset" out of the room, and so that room would not be part of the + "current memberships" of the user. + + Returns: + A 2-tuple of newly joined room IDs and a map of newly_left room + IDs to the `RoomsForUserStateReset` entry. + + We're using `RoomsForUserStateReset` but that doesn't necessarily mean the + user was state reset of the rooms. It's just that the `event_id`/`sender` + are optional and we can't tell the difference between the server leaving the + room when the user was the last person participating in the room and left or + was state reset out of the room. To actually check for a state reset, you + need to check if a membership still exists in the room. + """ + + newly_joined_room_ids: Set[str] = set() + newly_left_room_map: Dict[str, RoomsForUserStateReset] = {} + + if not from_token: + return newly_joined_room_ids, newly_left_room_map + + changes = await self.store.get_sliding_sync_membership_changes( + user_id, + from_key=from_token.room_key, + to_key=to_token.room_key, + excluded_room_ids=set(self.rooms_to_exclude_globally), + ) + + for room_id, entry in changes.items(): + if entry.membership == Membership.JOIN: + newly_joined_room_ids.add(room_id) + elif entry.membership == Membership.LEAVE: + newly_left_room_map[room_id] = entry + + return newly_joined_room_ids, newly_left_room_map + + @trace + async def _get_newly_joined_and_left_rooms_fallback( + self, + user_id: str, + to_token: StreamToken, + from_token: Optional[StreamToken], + ) -> Tuple[AbstractSet[str], Mapping[str, RoomsForUserStateReset]]: + """Fetch the sets of rooms that the user newly joined or left in the + given token range. + + Note: there may be rooms in the newly left rooms where the user was + "state reset" out of the room, and so that room would not be part of the + "current memberships" of the user. + + Returns: + A 2-tuple of newly joined room IDs and a map of newly_left room + IDs to the `RoomsForUserStateReset` entry. + + We're using `RoomsForUserStateReset` but that doesn't necessarily mean the + user was state reset of the rooms. It's just that the `event_id`/`sender` + are optional and we can't tell the difference between the server leaving the + room when the user was the last person participating in the room and left or + was state reset out of the room. To actually check for a state reset, you + need to check if a membership still exists in the room. + """ + newly_joined_room_ids: Set[str] = set() + newly_left_room_map: Dict[str, RoomsForUserStateReset] = {} + + # We need to figure out the + # + # - 1) Figure out which rooms are `newly_left` rooms (> `from_token` and <= `to_token`) + # - 2) Figure out which rooms are `newly_joined` (> `from_token` and <= `to_token`) + + # 1) Fetch membership changes that fall in the range from `from_token` up to `to_token` + current_state_delta_membership_changes_in_from_to_range = [] + if from_token: + current_state_delta_membership_changes_in_from_to_range = ( + await self.store.get_current_state_delta_membership_changes_for_user( + user_id, + from_key=from_token.room_key, + to_key=to_token.room_key, + excluded_room_ids=self.rooms_to_exclude_globally, + ) + ) + + # 1) Assemble a list of the last membership events in some given ranges. Someone + # could have left and joined multiple times during the given range but we only + # care about end-result so we grab the last one. + last_membership_change_by_room_id_in_from_to_range: Dict[ + str, CurrentStateDeltaMembership + ] = {} + # We also want to assemble a list of the first membership events during the token + # range so we can step backward to the previous membership that would apply to + # before the token range to see if we have `newly_joined` the room. + first_membership_change_by_room_id_in_from_to_range: Dict[ + str, CurrentStateDeltaMembership + ] = {} + # Keep track if the room has a non-join event in the token range so we can later + # tell if it was a `newly_joined` room. If the last membership event in the + # token range is a join and there is also some non-join in the range, we know + # they `newly_joined`. + has_non_join_event_by_room_id_in_from_to_range: Dict[str, bool] = {} + for ( + membership_change + ) in current_state_delta_membership_changes_in_from_to_range: + room_id = membership_change.room_id + + last_membership_change_by_room_id_in_from_to_range[room_id] = ( + membership_change + ) + # Only set if we haven't already set it + first_membership_change_by_room_id_in_from_to_range.setdefault( + room_id, membership_change + ) + + if membership_change.membership != Membership.JOIN: + has_non_join_event_by_room_id_in_from_to_range[room_id] = True + + # 1) Fixup + # + # 2) We also want to assemble a list of possibly newly joined rooms. Someone + # could have left and joined multiple times during the given range but we only + # care about whether they are joined at the end of the token range so we are + # working with the last membership even in the token range. + possibly_newly_joined_room_ids = set() + for ( + last_membership_change_in_from_to_range + ) in last_membership_change_by_room_id_in_from_to_range.values(): + room_id = last_membership_change_in_from_to_range.room_id + + # 2) + if last_membership_change_in_from_to_range.membership == Membership.JOIN: + possibly_newly_joined_room_ids.add(room_id) + + # 1) Figure out newly_left rooms (> `from_token` and <= `to_token`). + if last_membership_change_in_from_to_range.membership == Membership.LEAVE: + # 1) Mark this room as `newly_left` + newly_left_room_map[room_id] = RoomsForUserStateReset( + room_id=room_id, + sender=last_membership_change_in_from_to_range.sender, + membership=Membership.LEAVE, + event_id=last_membership_change_in_from_to_range.event_id, + event_pos=last_membership_change_in_from_to_range.event_pos, + room_version_id=await self.store.get_room_version_id(room_id), + ) + + # 2) Figure out `newly_joined` + for room_id in possibly_newly_joined_room_ids: + has_non_join_in_from_to_range = ( + has_non_join_event_by_room_id_in_from_to_range.get(room_id, False) + ) + # If the last membership event in the token range is a join and there is + # also some non-join in the range, we know they `newly_joined`. + if has_non_join_in_from_to_range: + # We found a `newly_joined` room (we left and joined within the token range) + newly_joined_room_ids.add(room_id) + else: + prev_event_id = first_membership_change_by_room_id_in_from_to_range[ + room_id + ].prev_event_id + prev_membership = first_membership_change_by_room_id_in_from_to_range[ + room_id + ].prev_membership + + if prev_event_id is None: + # We found a `newly_joined` room (we are joining the room for the + # first time within the token range) + newly_joined_room_ids.add(room_id) + # Last resort, we need to step back to the previous membership event + # just before the token range to see if we're joined then or not. + elif prev_membership != Membership.JOIN: + # We found a `newly_joined` room (we left before the token range + # and joined within the token range) + newly_joined_room_ids.add(room_id) + + return newly_joined_room_ids, newly_left_room_map + + @trace + async def _get_dm_rooms_for_user( + self, + user_id: str, + ) -> AbstractSet[str]: + """Get the set of DM rooms for the user.""" + + # We're using global account data (`m.direct`) instead of checking for + # `is_direct` on membership events because that property only appears for + # the invitee membership event (doesn't show up for the inviter). + # + # We're unable to take `to_token` into account for global account data since + # we only keep track of the latest account data for the user. + dm_map = await self.store.get_global_account_data_by_type_for_user( + user_id, AccountDataTypes.DIRECT + ) + + # Flatten out the map. Account data is set by the client so it needs to be + # scrutinized. + dm_room_id_set = set() + if isinstance(dm_map, dict): + for room_ids in dm_map.values(): + # Account data should be a list of room IDs. Ignore anything else + if isinstance(room_ids, list): + for room_id in room_ids: + if isinstance(room_id, str): + dm_room_id_set.add(room_id) + + return dm_room_id_set + + @trace + async def filter_rooms_relevant_for_sync( + self, + user: UserID, + room_membership_for_user_map: Dict[str, RoomsForUserType], + newly_left_room_ids: AbstractSet[str], + ) -> Dict[str, RoomsForUserType]: + """ + Filter room IDs that should/can be listed for this user in the sync response (the + full room list that will be further filtered, sorted, and sliced). + + We're looking for rooms where the user has the following state in the token + range (> `from_token` and <= `to_token`): + + - `invite`, `join`, `knock`, `ban` membership events + - Kicks (`leave` membership events where `sender` is different from the + `user_id`/`state_key`) + - `newly_left` (rooms that were left during the given token range) + - In order for bans/kicks to not show up in sync, you need to `/forget` those + rooms. This doesn't modify the event itself though and only adds the + `forgotten` flag to the `room_memberships` table in Synapse. There isn't a way + to tell when a room was forgotten at the moment so we can't factor it into the + from/to range. + + Args: + user: User that is syncing + room_membership_for_user_map: Room membership for the user + newly_left_room_ids: The set of room IDs we have newly left + + Returns: + A dictionary of room IDs that should be listed in the sync response along + with membership information in that room at the time of `to_token`. + """ + user_id = user.to_string() + + # Filter rooms to only what we're interested to sync with + filtered_sync_room_map = { + room_id: room_membership_for_user + for room_id, room_membership_for_user in room_membership_for_user_map.items() + if filter_membership_for_sync( + user_id=user_id, + room_membership_for_user=room_membership_for_user, + newly_left=room_id in newly_left_room_ids, + ) + } + + return filtered_sync_room_map + + async def check_room_subscription_allowed_for_user( + self, + room_id: str, + room_membership_for_user_map: Dict[str, RoomsForUserType], + to_token: StreamToken, + ) -> Optional[RoomsForUserType]: + """ + Check whether the user is allowed to see the room based on whether they have + ever had membership in the room or if the room is `world_readable`. + + Similar to `check_user_in_room_or_world_readable(...)` + + Args: + room_id: Room to check + room_membership_for_user_map: Room membership for the user at the time of + the `to_token` (<= `to_token`). + to_token: The token to fetch rooms up to. + + Returns: + The room membership for the user if they are allowed to subscribe to the + room else `None`. + """ + + # We can first check if they are already allowed to see the room based + # on our previous work to assemble the `room_membership_for_user_map`. + # + # If they have had any membership in the room over time (up to the `to_token`), + # let them subscribe and see what they can. + existing_membership_for_user = room_membership_for_user_map.get(room_id) + if existing_membership_for_user is not None: + return existing_membership_for_user + + # TODO: Handle `world_readable` rooms + return None + + # If the room is `world_readable`, it doesn't matter whether they can join, + # everyone can see the room. + # not_in_room_membership_for_user = _RoomMembershipForUser( + # room_id=room_id, + # event_id=None, + # event_pos=None, + # membership=None, + # sender=None, + # newly_joined=False, + # newly_left=False, + # is_dm=False, + # ) + # room_state = await self.get_current_state_at( + # room_id=room_id, + # room_membership_for_user_at_to_token=not_in_room_membership_for_user, + # state_filter=StateFilter.from_types( + # [(EventTypes.RoomHistoryVisibility, "")] + # ), + # to_token=to_token, + # ) + + # visibility_event = room_state.get((EventTypes.RoomHistoryVisibility, "")) + # if ( + # visibility_event is not None + # and visibility_event.content.get("history_visibility") + # == HistoryVisibility.WORLD_READABLE + # ): + # return not_in_room_membership_for_user + + # return None + + @trace + async def _bulk_get_stripped_state_for_rooms_from_sync_room_map( + self, + room_ids: StrCollection, + sync_room_map: Dict[str, RoomsForUserType], + ) -> Dict[str, Optional[StateMap[StrippedStateEvent]]]: + """ + Fetch stripped state for a list of room IDs. Stripped state is only + applicable to invite/knock rooms. Other rooms will have `None` as their + stripped state. + + For invite rooms, we pull from `unsigned.invite_room_state`. + For knock rooms, we pull from `unsigned.knock_room_state`. + + Args: + room_ids: Room IDs to fetch stripped state for + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + + Returns: + Mapping from room_id to mapping of (type, state_key) to stripped state + event. + """ + room_id_to_stripped_state_map: Dict[ + str, Optional[StateMap[StrippedStateEvent]] + ] = {} + + # Fetch what we haven't before + room_ids_to_fetch = [ + room_id + for room_id in room_ids + if room_id not in room_id_to_stripped_state_map + ] + + # Gather a list of event IDs we can grab stripped state from + invite_or_knock_event_ids: List[str] = [] + for room_id in room_ids_to_fetch: + if sync_room_map[room_id].membership in ( + Membership.INVITE, + Membership.KNOCK, + ): + event_id = sync_room_map[room_id].event_id + # If this is an invite/knock then there should be an event_id + assert event_id is not None + invite_or_knock_event_ids.append(event_id) + else: + room_id_to_stripped_state_map[room_id] = None + + invite_or_knock_events = await self.store.get_events(invite_or_knock_event_ids) + for invite_or_knock_event in invite_or_knock_events.values(): + room_id = invite_or_knock_event.room_id + membership = invite_or_knock_event.membership + + raw_stripped_state_events = None + if membership == Membership.INVITE: + invite_room_state = invite_or_knock_event.unsigned.get( + "invite_room_state" + ) + raw_stripped_state_events = invite_room_state + elif membership == Membership.KNOCK: + knock_room_state = invite_or_knock_event.unsigned.get( + "knock_room_state" + ) + raw_stripped_state_events = knock_room_state + else: + raise AssertionError( + f"Unexpected membership {membership} (this is a problem with Synapse itself)" + ) + + stripped_state_map: Optional[MutableStateMap[StrippedStateEvent]] = None + # Scrutinize unsigned things. `raw_stripped_state_events` should be a list + # of stripped events + if raw_stripped_state_events is not None: + stripped_state_map = {} + if isinstance(raw_stripped_state_events, list): + for raw_stripped_event in raw_stripped_state_events: + stripped_state_event = parse_stripped_state_event( + raw_stripped_event + ) + if stripped_state_event is not None: + stripped_state_map[ + ( + stripped_state_event.type, + stripped_state_event.state_key, + ) + ] = stripped_state_event + + room_id_to_stripped_state_map[room_id] = stripped_state_map + + return room_id_to_stripped_state_map + + @trace + async def _bulk_get_partial_current_state_content_for_rooms( + self, + content_type: Literal[ + # `content.type` from `EventTypes.Create`` + "room_type", + # `content.algorithm` from `EventTypes.RoomEncryption` + "room_encryption", + ], + room_ids: Set[str], + sync_room_map: Dict[str, RoomsForUserType], + to_token: StreamToken, + room_id_to_stripped_state_map: Dict[ + str, Optional[StateMap[StrippedStateEvent]] + ], + ) -> Mapping[str, Union[Optional[str], StateSentinel]]: + """ + Get the given state event content for a list of rooms. First we check the + current state of the room, then fallback to stripped state if available, then + historical state. + + Args: + content_type: Which content to grab + room_ids: Room IDs to fetch the given content field for. + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + to_token: We filter based on the state of the room at this token + room_id_to_stripped_state_map: This does not need to be filled in before + calling this function. Mapping from room_id to mapping of (type, state_key) + to stripped state event. Modified in place when we fetch new rooms so we can + save work next time this function is called. + + Returns: + A mapping from room ID to the state event content if the room has + the given state event (event_type, ""), otherwise `None`. Rooms unknown to + this server will return `ROOM_UNKNOWN_SENTINEL`. + """ + room_id_to_content: Dict[str, Union[Optional[str], StateSentinel]] = {} + + # As a bulk shortcut, use the current state if the server is particpating in the + # room (meaning we have current state). Ideally, for leave/ban rooms, we would + # want the state at the time of the membership instead of current state to not + # leak anything but we consider the create/encryption stripped state events to + # not be a secret given they are often set at the start of the room and they are + # normally handed out on invite/knock. + # + # Be mindful to only use this for non-sensitive details. For example, even + # though the room name/avatar/topic are also stripped state, they seem a lot + # more senstive to leak the current state value of. + # + # Since this function is cached, we need to make a mutable copy via + # `dict(...)`. + event_type = "" + event_content_field = "" + if content_type == "room_type": + event_type = EventTypes.Create + event_content_field = EventContentFields.ROOM_TYPE + room_id_to_content = dict(await self.store.bulk_get_room_type(room_ids)) + elif content_type == "room_encryption": + event_type = EventTypes.RoomEncryption + event_content_field = EventContentFields.ENCRYPTION_ALGORITHM + room_id_to_content = dict( + await self.store.bulk_get_room_encryption(room_ids) + ) + else: + assert_never(content_type) + + room_ids_with_results = [ + room_id + for room_id, content_field in room_id_to_content.items() + if content_field is not ROOM_UNKNOWN_SENTINEL + ] + + # We might not have current room state for remote invite/knocks if we are + # the first person on our server to see the room. The best we can do is look + # in the optional stripped state from the invite/knock event. + room_ids_without_results = room_ids.difference( + chain( + room_ids_with_results, + [ + room_id + for room_id, stripped_state_map in room_id_to_stripped_state_map.items() + if stripped_state_map is not None + ], + ) + ) + room_id_to_stripped_state_map.update( + await self._bulk_get_stripped_state_for_rooms_from_sync_room_map( + room_ids_without_results, sync_room_map + ) + ) + + # Update our `room_id_to_content` map based on the stripped state + # (applies to invite/knock rooms) + rooms_ids_without_stripped_state: Set[str] = set() + for room_id in room_ids_without_results: + stripped_state_map = room_id_to_stripped_state_map.get( + room_id, Sentinel.UNSET_SENTINEL + ) + assert stripped_state_map is not Sentinel.UNSET_SENTINEL, ( + f"Stripped state left unset for room {room_id}. " + + "Make sure you're calling `_bulk_get_stripped_state_for_rooms_from_sync_room_map(...)` " + + "with that room_id. (this is a problem with Synapse itself)" + ) + + # If there is some stripped state, we assume the remote server passed *all* + # of the potential stripped state events for the room. + if stripped_state_map is not None: + create_stripped_event = stripped_state_map.get((EventTypes.Create, "")) + stripped_event = stripped_state_map.get((event_type, "")) + # Sanity check that we at-least have the create event + if create_stripped_event is not None: + if stripped_event is not None: + room_id_to_content[room_id] = stripped_event.content.get( + event_content_field + ) + else: + # Didn't see the state event we're looking for in the stripped + # state so we can assume relevant content field is `None`. + room_id_to_content[room_id] = None + else: + rooms_ids_without_stripped_state.add(room_id) + + # Last resort, we might not have current room state for rooms that the + # server has left (no one local is in the room) but we can look at the + # historical state. + # + # Update our `room_id_to_content` map based on the state at the time of + # the membership event. + for room_id in rooms_ids_without_stripped_state: + # TODO: It would be nice to look this up in a bulk way (N+1 queries) + # + # TODO: `get_state_at(...)` doesn't take into account the "current state". + room_state = await self.storage_controllers.state.get_state_at( + room_id=room_id, + stream_position=to_token.copy_and_replace( + StreamKeyType.ROOM, + sync_room_map[room_id].event_pos.to_room_stream_token(), + ), + state_filter=StateFilter.from_types( + [ + (EventTypes.Create, ""), + (event_type, ""), + ] + ), + # Partially-stated rooms should have all state events except for + # remote membership events so we don't need to wait at all because + # we only want the create event and some non-member event. + await_full_state=False, + ) + # We can use the create event as a canary to tell whether the server has + # seen the room before + create_event = room_state.get((EventTypes.Create, "")) + state_event = room_state.get((event_type, "")) + + if create_event is None: + # Skip for unknown rooms + continue + + if state_event is not None: + room_id_to_content[room_id] = state_event.content.get( + event_content_field + ) + else: + # Didn't see the state event we're looking for in the stripped + # state so we can assume relevant content field is `None`. + room_id_to_content[room_id] = None + + return room_id_to_content + + @trace + async def filter_rooms( + self, + user: UserID, + sync_room_map: Dict[str, RoomsForUserType], + previous_connection_state: PerConnectionState, + filters: SlidingSyncConfig.SlidingSyncList.Filters, + to_token: StreamToken, + dm_room_ids: AbstractSet[str], + ) -> Dict[str, RoomsForUserType]: + """ + Filter rooms based on the sync request. + + Args: + user: User to filter rooms for + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + filters: Filters to apply + to_token: We filter based on the state of the room at this token + dm_room_ids: Set of room IDs that are DMs for the user + + Returns: + A filtered dictionary of room IDs along with membership information in the + room at the time of `to_token`. + """ + user_id = user.to_string() + + room_id_to_stripped_state_map: Dict[ + str, Optional[StateMap[StrippedStateEvent]] + ] = {} + + filtered_room_id_set = set(sync_room_map.keys()) + + # Filter for Direct-Message (DM) rooms + if filters.is_dm is not None: + with start_active_span("filters.is_dm"): + if filters.is_dm: + # Only DM rooms please + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if room_id in dm_room_ids + } + else: + # Only non-DM rooms please + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + if room_id not in dm_room_ids + } + + if filters.spaces is not None: + with start_active_span("filters.spaces"): + raise NotImplementedError() + + # Filter for encrypted rooms + if filters.is_encrypted is not None: + with start_active_span("filters.is_encrypted"): + room_id_to_encryption = ( + await self._bulk_get_partial_current_state_content_for_rooms( + content_type="room_encryption", + room_ids=filtered_room_id_set, + to_token=to_token, + sync_room_map=sync_room_map, + room_id_to_stripped_state_map=room_id_to_stripped_state_map, + ) + ) + + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + encryption = room_id_to_encryption.get( + room_id, ROOM_UNKNOWN_SENTINEL + ) + + # Just remove rooms if we can't determine their encryption status + if encryption is ROOM_UNKNOWN_SENTINEL: + filtered_room_id_set.remove(room_id) + continue + + # If we're looking for encrypted rooms, filter out rooms that are not + # encrypted and vice versa + is_encrypted = encryption is not None + if (filters.is_encrypted and not is_encrypted) or ( + not filters.is_encrypted and is_encrypted + ): + filtered_room_id_set.remove(room_id) + + # Filter for rooms that the user has been invited to + if filters.is_invite is not None: + with start_active_span("filters.is_invite"): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + room_for_user = sync_room_map[room_id] + # If we're looking for invite rooms, filter out rooms that the user is + # not invited to and vice versa + if ( + filters.is_invite + and room_for_user.membership != Membership.INVITE + ) or ( + not filters.is_invite + and room_for_user.membership == Membership.INVITE + ): + filtered_room_id_set.remove(room_id) + + # Filter by room type (space vs room, etc). A room must match one of the types + # provided in the list. `None` is a valid type for rooms which do not have a + # room type. + if filters.room_types is not None or filters.not_room_types is not None: + with start_active_span("filters.room_types"): + room_id_to_type = ( + await self._bulk_get_partial_current_state_content_for_rooms( + content_type="room_type", + room_ids=filtered_room_id_set, + to_token=to_token, + sync_room_map=sync_room_map, + room_id_to_stripped_state_map=room_id_to_stripped_state_map, + ) + ) + + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + room_type = room_id_to_type.get(room_id, ROOM_UNKNOWN_SENTINEL) + + # Just remove rooms if we can't determine their type + if room_type is ROOM_UNKNOWN_SENTINEL: + filtered_room_id_set.remove(room_id) + continue + + if ( + filters.room_types is not None + and room_type not in filters.room_types + ): + filtered_room_id_set.remove(room_id) + continue + + if ( + filters.not_room_types is not None + and room_type in filters.not_room_types + ): + filtered_room_id_set.remove(room_id) + continue + + if filters.room_name_like is not None: + with start_active_span("filters.room_name_like"): + # TODO: The room name is a bit more sensitive to leak than the + # create/encryption event. Maybe we should consider a better way to fetch + # historical state before implementing this. + # + # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms( + # content_type="room_name", + # room_ids=filtered_room_id_set, + # to_token=to_token, + # sync_room_map=sync_room_map, + # room_id_to_stripped_state_map=room_id_to_stripped_state_map, + # ) + raise NotImplementedError() + + # Filter by room tags according to the users account data + if filters.tags is not None or filters.not_tags is not None: + with start_active_span("filters.tags"): + # Fetch the user tags for their rooms + room_tags = await self.store.get_tags_for_user(user_id) + room_id_to_tag_name_set: Dict[str, Set[str]] = { + room_id: set(tags.keys()) for room_id, tags in room_tags.items() + } + + if filters.tags is not None: + tags_set = set(filters.tags) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + # Remove rooms that don't have one of the tags in the filter + if room_id_to_tag_name_set.get(room_id, set()).intersection( + tags_set + ) + } + + if filters.not_tags is not None: + not_tags_set = set(filters.not_tags) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + # Remove rooms if they have any of the tags in the filter + if not room_id_to_tag_name_set.get(room_id, set()).intersection( + not_tags_set + ) + } + + # Keep rooms if the user has been state reset out of it but we previously sent + # down the connection before. We want to make sure that we send these down to + # the client regardless of filters so they find out about the state reset. + # + # We don't always have access to the state in a room after being state reset if + # no one else locally on the server is participating in the room so we patch + # these back in manually. + state_reset_out_of_room_id_set = { + room_id + for room_id in sync_room_map.keys() + if sync_room_map[room_id].event_id is None + and previous_connection_state.rooms.have_sent_room(room_id).status + != HaveSentRoomFlag.NEVER + } + + # Assemble a new sync room map but only with the `filtered_room_id_set` + return { + room_id: sync_room_map[room_id] + for room_id in filtered_room_id_set | state_reset_out_of_room_id_set + } + + @trace + async def filter_rooms_using_tables( + self, + user_id: str, + sync_room_map: Mapping[str, RoomsForUserSlidingSync], + previous_connection_state: PerConnectionState, + filters: SlidingSyncConfig.SlidingSyncList.Filters, + to_token: StreamToken, + dm_room_ids: AbstractSet[str], + ) -> Dict[str, RoomsForUserSlidingSync]: + """ + Filter rooms based on the sync request. + + Args: + user: User to filter rooms for + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + filters: Filters to apply + to_token: We filter based on the state of the room at this token + dm_room_ids: Set of room IDs which are DMs + room_tags: Mapping of room ID to tags + + Returns: + A filtered dictionary of room IDs along with membership information in the + room at the time of `to_token`. + """ + + filtered_room_id_set = set(sync_room_map.keys()) + + # Filter for Direct-Message (DM) rooms + if filters.is_dm is not None: + with start_active_span("filters.is_dm"): + if filters.is_dm: + # Intersect with the DM room set + filtered_room_id_set &= dm_room_ids + else: + # Remove DMs + filtered_room_id_set -= dm_room_ids + + if filters.spaces is not None: + with start_active_span("filters.spaces"): + raise NotImplementedError() + + # Filter for encrypted rooms + if filters.is_encrypted is not None: + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + # Remove rooms if we can't figure out what the encryption status is + if sync_room_map[room_id].has_known_state + # Or remove if it doesn't match the filter + and sync_room_map[room_id].is_encrypted == filters.is_encrypted + } + + # Filter for rooms that the user has been invited to + if filters.is_invite is not None: + with start_active_span("filters.is_invite"): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + room_for_user = sync_room_map[room_id] + # If we're looking for invite rooms, filter out rooms that the user is + # not invited to and vice versa + if ( + filters.is_invite + and room_for_user.membership != Membership.INVITE + ) or ( + not filters.is_invite + and room_for_user.membership == Membership.INVITE + ): + filtered_room_id_set.remove(room_id) + + # Filter by room type (space vs room, etc). A room must match one of the types + # provided in the list. `None` is a valid type for rooms which do not have a + # room type. + if filters.room_types is not None or filters.not_room_types is not None: + with start_active_span("filters.room_types"): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for room_id in filtered_room_id_set.copy(): + # Remove rooms if we can't figure out what room type it is + if not sync_room_map[room_id].has_known_state: + filtered_room_id_set.remove(room_id) + continue + + room_type = sync_room_map[room_id].room_type + + if ( + filters.room_types is not None + and room_type not in filters.room_types + ): + filtered_room_id_set.remove(room_id) + continue + + if ( + filters.not_room_types is not None + and room_type in filters.not_room_types + ): + filtered_room_id_set.remove(room_id) + continue + + if filters.room_name_like is not None: + with start_active_span("filters.room_name_like"): + # TODO: The room name is a bit more sensitive to leak than the + # create/encryption event. Maybe we should consider a better way to fetch + # historical state before implementing this. + # + # room_id_to_create_content = await self._bulk_get_partial_current_state_content_for_rooms( + # content_type="room_name", + # room_ids=filtered_room_id_set, + # to_token=to_token, + # sync_room_map=sync_room_map, + # room_id_to_stripped_state_map=room_id_to_stripped_state_map, + # ) + raise NotImplementedError() + + # Filter by room tags according to the users account data + if filters.tags is not None or filters.not_tags is not None: + with start_active_span("filters.tags"): + # Fetch the user tags for their rooms + room_tags = await self.store.get_tags_for_user(user_id) + room_id_to_tag_name_set: Dict[str, Set[str]] = { + room_id: set(tags.keys()) for room_id, tags in room_tags.items() + } + + if filters.tags is not None: + tags_set = set(filters.tags) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + # Remove rooms that don't have one of the tags in the filter + if room_id_to_tag_name_set.get(room_id, set()).intersection( + tags_set + ) + } + + if filters.not_tags is not None: + not_tags_set = set(filters.not_tags) + filtered_room_id_set = { + room_id + for room_id in filtered_room_id_set + # Remove rooms if they have any of the tags in the filter + if not room_id_to_tag_name_set.get(room_id, set()).intersection( + not_tags_set + ) + } + + # Keep rooms if the user has been state reset out of it but we previously sent + # down the connection before. We want to make sure that we send these down to + # the client regardless of filters so they find out about the state reset. + # + # We don't always have access to the state in a room after being state reset if + # no one else locally on the server is participating in the room so we patch + # these back in manually. + state_reset_out_of_room_id_set = { + room_id + for room_id in sync_room_map.keys() + if sync_room_map[room_id].event_id is None + and previous_connection_state.rooms.have_sent_room(room_id).status + != HaveSentRoomFlag.NEVER + } + + # Assemble a new sync room map but only with the `filtered_room_id_set` + return { + room_id: sync_room_map[room_id] + for room_id in filtered_room_id_set | state_reset_out_of_room_id_set + } + + @trace + async def sort_rooms( + self, + sync_room_map: Dict[str, RoomsForUserType], + to_token: StreamToken, + limit: Optional[int] = None, + ) -> List[RoomsForUserType]: + """ + Sort by `stream_ordering` of the last event that the user should see in the + room. `stream_ordering` is unique so we get a stable sort. + + If `limit` is specified then sort may return fewer entries, but will + always return at least the top N rooms. This is useful as we don't always + need to sort the full list, but are just interested in the top N. + + Args: + sync_room_map: Dictionary of room IDs to sort along with membership + information in the room at the time of `to_token`. + to_token: We sort based on the events in the room at this token (<= `to_token`) + limit: The number of rooms that we need to return from the top of the list. + + Returns: + A sorted list of room IDs by `stream_ordering` along with membership information. + """ + + # Assemble a map of room ID to the `stream_ordering` of the last activity that the + # user should see in the room (<= `to_token`) + last_activity_in_room_map: Dict[str, int] = {} + + # Same as above, except for positions that we know are in the event + # stream cache. + cached_positions: Dict[str, int] = {} + + earliest_cache_position = ( + self.store._events_stream_cache.get_earliest_known_position() + ) + + for room_id, room_for_user in sync_room_map.items(): + if room_for_user.membership == Membership.JOIN: + # For joined rooms check the stream change cache. + cached_position = ( + self.store._events_stream_cache.get_max_pos_of_last_change(room_id) + ) + if cached_position is not None: + cached_positions[room_id] = cached_position + else: + # If the user has left/been invited/knocked/been banned from a + # room, they shouldn't see anything past that point. + # + # FIXME: It's possible that people should see beyond this point + # in invited/knocked cases if for example the room has + # `invite`/`world_readable` history visibility, see + # https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1653045932 + last_activity_in_room_map[room_id] = room_for_user.event_pos.stream + + # If the stream position is in range of the stream change cache + # we can include it. + if room_for_user.event_pos.stream > earliest_cache_position: + cached_positions[room_id] = room_for_user.event_pos.stream + + # If we are only asked for the top N rooms, and we have enough from + # looking in the stream change cache, then we can return early. This + # is because the cache must include all entries above + # `.get_earliest_known_position()`. + if limit is not None and len(cached_positions) >= limit: + # ... but first we need to handle the case where the cached max + # position is greater than the to_token, in which case we do + # actually query the DB. This should happen rarely, so can do it in + # a loop. + for room_id, position in list(cached_positions.items()): + if position > to_token.room_key.stream: + result = await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, to_token.room_key + ) + if ( + result is not None + and result[1].stream > earliest_cache_position + ): + # We have a stream position in the cached range. + cached_positions[room_id] = result[1].stream + else: + # No position in the range, so we remove the entry. + cached_positions.pop(room_id) + + if limit is not None and len(cached_positions) >= limit: + return sorted( + ( + room + for room in sync_room_map.values() + if room.room_id in cached_positions + ), + # Sort by the last activity (stream_ordering) in the room + key=lambda room_info: cached_positions[room_info.room_id], + # We want descending order + reverse=True, + ) + + # For fully-joined rooms, we find the latest activity at/before the + # `to_token`. + joined_room_positions = ( + await self.store.bulk_get_last_event_pos_in_room_before_stream_ordering( + [ + room_id + for room_id, room_for_user in sync_room_map.items() + if room_for_user.membership == Membership.JOIN + ], + to_token.room_key, + ) + ) + + last_activity_in_room_map.update(joined_room_positions) + + return sorted( + sync_room_map.values(), + # Sort by the last activity (stream_ordering) in the room + key=lambda room_info: last_activity_in_room_map[room_info.room_id], + # We want descending order + reverse=True, + ) + + async def get_is_encrypted_for_room_at_token( + self, room_id: str, to_token: RoomStreamToken + ) -> bool: + """Get if the room is encrypted at the time.""" + + # Fetch the current encryption state + state_ids = await self.store.get_partial_filtered_current_state_ids( + room_id, StateFilter.from_types([(EventTypes.RoomEncryption, "")]) + ) + encryption_event_id = state_ids.get((EventTypes.RoomEncryption, "")) + + # Now roll back the state by looking at the state deltas between + # to_token and now. + deltas = await self.store.get_current_state_deltas_for_room( + room_id, + from_token=to_token, + to_token=self.store.get_room_max_token(), + ) + + for delta in deltas: + if delta.event_type != EventTypes.RoomEncryption: + continue + + # Found the first change, we look at the previous event ID to get + # the state at the to token. + + if delta.prev_event_id is None: + # There is no prev event, so no encryption state event, so room is not encrypted + return False + + encryption_event_id = delta.prev_event_id + break + + # We didn't find an encryption state, room isn't encrypted + if encryption_event_id is None: + return False + + # We found encryption state, check if content has a non-null algorithm + encrypted_event = await self.store.get_event(encryption_event_id) + algorithm = encrypted_event.content.get(EventContentFields.ENCRYPTION_ALGORITHM) + + return algorithm is not None diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py new file mode 100644
index 0000000000..d24fccf76f --- /dev/null +++ b/synapse/handlers/sliding_sync/store.py
@@ -0,0 +1,128 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +import logging +from typing import TYPE_CHECKING, Optional + +import attr + +from synapse.logging.opentracing import trace +from synapse.storage.databases.main import DataStore +from synapse.types import SlidingSyncStreamToken +from synapse.types.handlers.sliding_sync import ( + MutablePerConnectionState, + PerConnectionState, + SlidingSyncConfig, +) + +if TYPE_CHECKING: + pass + +logger = logging.getLogger(__name__) + + +@attr.s(auto_attribs=True) +class SlidingSyncConnectionStore: + """In-memory store of per-connection state, including what rooms we have + previously sent down a sliding sync connection. + + Note: This is NOT safe to run in a worker setup because connection positions will + point to different sets of rooms on different workers. e.g. for the same connection, + a connection position of 5 might have totally different states on worker A and + worker B. + + One complication that we need to deal with here is needing to handle requests being + resent, i.e. if we sent down a room in a response that the client received, we must + consider the room *not* sent when we get the request again. + + This is handled by using an integer "token", which is returned to the client + as part of the sync token. For each connection we store a mapping from + tokens to the room states, and create a new entry when we send down new + rooms. + + Note that for any given sliding sync connection we will only store a maximum + of two different tokens: the previous token from the request and a new token + sent in the response. When we receive a request with a given token, we then + clear out all other entries with a different token. + + Attributes: + _connections: Mapping from `(user_id, conn_id)` to mapping of `token` + to mapping of room ID to `HaveSentRoom`. + """ + + store: "DataStore" + + async def get_and_clear_connection_positions( + self, + sync_config: SlidingSyncConfig, + from_token: Optional[SlidingSyncStreamToken], + ) -> PerConnectionState: + """Fetch the per-connection state for the token. + + Raises: + SlidingSyncUnknownPosition if the connection_token is unknown + """ + # If this is our first request, there is no previous connection state to fetch out of the database + if from_token is None or from_token.connection_position == 0: + return PerConnectionState() + + conn_id = sync_config.conn_id or "" + + device_id = sync_config.requester.device_id + assert device_id is not None + + return await self.store.get_and_clear_connection_positions( + sync_config.user.to_string(), + device_id, + conn_id, + from_token.connection_position, + ) + + @trace + async def record_new_state( + self, + sync_config: SlidingSyncConfig, + from_token: Optional[SlidingSyncStreamToken], + new_connection_state: MutablePerConnectionState, + ) -> int: + """Record updated per-connection state, returning the connection + position associated with the new state. + If there are no changes to the state this may return the same token as + the existing per-connection state. + """ + if not new_connection_state.has_updates(): + if from_token is not None: + return from_token.connection_position + else: + return 0 + + # A from token with a zero connection position means there was no + # previously stored connection state, so we treat a zero the same as + # there being no previous position. + previous_connection_position = None + if from_token is not None and from_token.connection_position != 0: + previous_connection_position = from_token.connection_position + + conn_id = sync_config.conn_id or "" + + device_id = sync_config.requester.device_id + assert device_id is not None + + return await self.store.persist_per_connection_state( + sync_config.user.to_string(), + device_id, + conn_id, + previous_connection_position, + new_connection_state, + ) diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index ee74289b6c..2795b282e5 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py
@@ -33,17 +33,17 @@ from typing import ( Mapping, NoReturn, Optional, + Protocol, Set, ) from urllib.parse import urlencode import attr -from typing_extensions import Protocol from twisted.web.iweb import IRequest from twisted.web.server import Request -from synapse.api.constants import LoginType +from synapse.api.constants import LoginType, ProfileFields from synapse.api.errors import Codes, NotFoundError, RedirectException, SynapseError from synapse.config.sso import SsoAttributeRequirement from synapse.handlers.device import DeviceHandler @@ -81,8 +81,7 @@ class SsoIdentityProvider(Protocol): An Identity Provider, or IdP, is an external HTTP service which authenticates a user to say whether they should be allowed to log in, or perform a given action. - Synapse supports various implementations of IdPs, including OpenID Connect, SAML, - and CAS. + Synapse supports various implementations of IdPs, including OpenID Connect. The main entry point is `handle_redirect_request`, which should return a URI to redirect the user's browser to the IdP's authentication page. @@ -97,7 +96,7 @@ class SsoIdentityProvider(Protocol): def idp_id(self) -> str: """A unique identifier for this SSO provider - Eg, "saml", "cas", "github" + Eg. "github" """ @property @@ -157,7 +156,7 @@ class UserAttributes: class UsernameMappingSession: """Data we track about SSO sessions""" - # A unique identifier for this SSO provider, e.g. "oidc" or "saml". + # A unique identifier for this SSO provider, e.g. "oidc". auth_provider_id: str # An optional session ID from the IdP. @@ -351,7 +350,7 @@ class SsoHandler: Args: auth_provider_id: A unique identifier for this SSO provider, e.g. - "oidc" or "saml". + "oidc". remote_user_id: The user ID according to the remote IdP. This might be an e-mail address, a GUID, or some other form. It must be unique and immutable. @@ -418,7 +417,7 @@ class SsoHandler: Args: auth_provider_id: A unique identifier for this SSO provider, e.g. - "oidc" or "saml". + "oidc". remote_user_id: The unique identifier from the SSO provider. @@ -634,7 +633,7 @@ class SsoHandler: Args: auth_provider_id: A unique identifier for this SSO provider, e.g. - "oidc" or "saml". + "oidc". remote_user_id: The unique identifier from the SSO provider. @@ -704,7 +703,7 @@ class SsoHandler: including a non-empty localpart. auth_provider_id: A unique identifier for this SSO provider, e.g. - "oidc" or "saml". + "oidc". remote_user_id: The unique identifier from the SSO provider. @@ -813,9 +812,10 @@ class SsoHandler: # bail if user already has the same avatar profile = await self._profile_handler.get_profile(user_id) - if profile["avatar_url"] is not None: - server_name = profile["avatar_url"].split("/")[-2] - media_id = profile["avatar_url"].split("/")[-1] + if ProfileFields.AVATAR_URL in profile: + avatar_url_parts = profile[ProfileFields.AVATAR_URL].split("/") + server_name = avatar_url_parts[-2] + media_id = avatar_url_parts[-1] if self._is_mine_server_name(server_name): media = await self._media_repo.store.get_local_media(media_id) # type: ignore[has-type] if media is not None and upload_name == media.upload_name: @@ -855,12 +855,12 @@ class SsoHandler: Given an SSO ID, retrieve the user ID for it and complete UIA. Note that this requires that the user is mapped in the "user_external_ids" - table. This will be the case if they have ever logged in via SAML or OIDC in + table. This will be the case if they have ever logged in via OIDC in recentish synapse versions, but may not be for older users. Args: auth_provider_id: A unique identifier for this SSO provider, e.g. - "oidc" or "saml". + "oidc". remote_user_id: The unique identifier from the SSO provider. ui_auth_session_id: The ID of the user-interactive auth session. request: The request to complete. @@ -1184,16 +1184,16 @@ class SsoHandler: Args: auth_provider_id: A unique identifier for this SSO provider, e.g. - "oidc" or "saml". + "oidc". auth_provider_session_id: The session ID from the provider to logout expected_user_id: The user we're expecting to logout. If set, it will ignore sessions belonging to other users and log an error. """ # It is expected that this is the main process. - assert isinstance( - self._device_handler, DeviceHandler - ), "revoking SSO sessions can only be called on the main process" + assert isinstance(self._device_handler, DeviceHandler), ( + "revoking SSO sessions can only be called on the main process" + ) # Invalidate any running user-mapping sessions to_delete = [] @@ -1276,12 +1276,16 @@ def _check_attribute_requirement( return False # If the requirement is None, the attribute existing is enough. - if req.value is None: + if req.value is None and req.one_of is None: return True values = attributes[req.attribute] if req.value in values: return True + if req.one_of: + for value in req.one_of: + if value in values: + return True logger.info( "SSO attribute %s did not match required value '%s' (was '%s')", diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 6af2eeb75f..c6f2c38d8d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py
@@ -66,6 +66,7 @@ from synapse.logging.opentracing import ( from synapse.storage.databases.main.event_push_actions import RoomNotifCounts from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.databases.main.stream import PaginateFunction +from synapse.storage.invite_rule import InviteRule from synapse.storage.roommember import MemberSummary from synapse.types import ( DeviceListUpdates, @@ -86,7 +87,7 @@ from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.lrucache import LruCache from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext -from synapse.util.metrics import Measure, measure_func +from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -143,6 +144,7 @@ class SyncConfig: filter_collection: FilterCollection is_guest: bool device_id: Optional[str] + use_state_after: bool @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -183,10 +185,7 @@ class JoinedSyncResult: to tell if room needs to be part of the sync result. """ return bool( - self.timeline - or self.state - or self.ephemeral - or self.account_data + self.timeline or self.state or self.ephemeral or self.account_data # nb the notification count does not, er, count: if there's nothing # else in the result, we don't need to send it. ) @@ -575,10 +574,10 @@ class SyncHandler: if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. - result: Union[SyncResult, E2eeSyncResult] = ( - await self.current_sync_for_user( - sync_config, sync_version, since_token, full_state=full_state - ) + result: Union[ + SyncResult, E2eeSyncResult + ] = await self.current_sync_for_user( + sync_config, sync_version, since_token, full_state=full_state ) else: # Otherwise, we wait for something to happen and report it to the user. @@ -673,10 +672,10 @@ class SyncHandler: # Go through the `/sync` v2 path if sync_version == SyncVersion.SYNC_V2: - sync_result: Union[SyncResult, E2eeSyncResult] = ( - await self.generate_sync_result( - sync_config, since_token, full_state - ) + sync_result: Union[ + SyncResult, E2eeSyncResult + ] = await self.generate_sync_result( + sync_config, since_token, full_state ) # Go through the MSC3575 Sliding Sync `/sync/e2ee` path elif sync_version == SyncVersion.E2EE_SYNC: @@ -909,7 +908,7 @@ class SyncHandler: # Use `stream_ordering` for updates else paginate_room_events_by_stream_ordering ) - events, end_key = await pagination_method( + events, end_key, limited = await pagination_method( room_id=room_id, # The bounds are reversed so we can paginate backwards # (from newer to older events) starting at to_bound. @@ -917,9 +916,7 @@ class SyncHandler: from_key=end_key, to_key=since_key, direction=Direction.BACKWARDS, - # We add one so we can determine if there are enough events to saturate - # the limit or not (see `limited`) - limit=load_limit + 1, + limit=load_limit, ) # We want to return the events in ascending order (the last event is the # most recent). @@ -974,9 +971,6 @@ class SyncHandler: loaded_recents.extend(recents) recents = loaded_recents - if len(events) <= load_limit: - limited = False - break max_repeat -= 1 if len(recents) > timeline_limit: @@ -1149,6 +1143,7 @@ class SyncHandler: since_token: Optional[StreamToken], end_token: StreamToken, full_state: bool, + joined: bool, ) -> MutableStateMap[EventBase]: """Works out the difference in state between the end of the previous sync and the start of the timeline. @@ -1163,6 +1158,7 @@ class SyncHandler: the point just after their leave event. full_state: Whether to force returning the full state. `lazy_load_members` still applies when `full_state` is `True`. + joined: whether the user is currently joined to the room Returns: The state to return in the sync response for the room. @@ -1238,11 +1234,12 @@ class SyncHandler: if full_state: state_ids = await self._compute_state_delta_for_full_sync( room_id, - sync_config.user, + sync_config, batch, end_token, members_to_fetch, timeline_state, + joined, ) else: # If this is an initial sync then full_state should be set, and @@ -1252,6 +1249,7 @@ class SyncHandler: state_ids = await self._compute_state_delta_for_incremental_sync( room_id, + sync_config, batch, since_token, end_token, @@ -1324,20 +1322,24 @@ class SyncHandler: async def _compute_state_delta_for_full_sync( self, room_id: str, - syncing_user: UserID, + sync_config: SyncConfig, batch: TimelineBatch, end_token: StreamToken, members_to_fetch: Optional[Set[str]], timeline_state: StateMap[str], + joined: bool, ) -> StateMap[str]: """Calculate the state events to be included in a full sync response. As with `_compute_state_delta_for_incremental_sync`, the result will include the membership events for the senders of each event in `members_to_fetch`. + Note that whether this returns the state at the start or the end of the + batch depends on `sync_config.use_state_after` (c.f. MSC4222). + Args: room_id: The room we are calculating for. - syncing_user: The user that is calling `/sync`. + sync_confg: The user that is calling `/sync`. batch: The timeline batch for the room that will be sent to the user. end_token: Token of the end of the current batch. Normally this will be the same as the global "now_token", but if the user has left the room, @@ -1346,10 +1348,11 @@ class SyncHandler: events in the timeline. timeline_state: The contribution to the room state from state events in `batch`. Only contains the last event for any given state key. + joined: whether the user is currently joined to the room Returns: A map from (type, state_key) to event_id, for each event that we believe - should be included in the `state` part of the sync response. + should be included in the `state` or `state_after` part of the sync response. """ if members_to_fetch is not None: # Lazy-loading of membership events is enabled. @@ -1367,7 +1370,7 @@ class SyncHandler: # is no guarantee that our membership will be in the auth events of # timeline events when the room is partial stated. state_filter = StateFilter.from_lazy_load_member_list( - members_to_fetch.union((syncing_user.to_string(),)) + members_to_fetch.union((sync_config.user.to_string(),)) ) # We are happy to use partial state to compute the `/sync` response. @@ -1381,6 +1384,61 @@ class SyncHandler: await_full_state = True lazy_load_members = False + # Check if we are wanting to return the state at the start or end of the + # timeline. If at the end we can just use the current state. + if sync_config.use_state_after: + # If we're getting the state at the end of the timeline, we can just + # use the current state of the room (and roll back any changes + # between when we fetched the current state and `end_token`). + # + # For rooms we're not joined to, there might be a very large number + # of deltas between `end_token` and "now", and so instead we fetch + # the state at the end of the timeline. + if joined: + state_ids = await self._state_storage_controller.get_current_state_ids( + room_id, + state_filter=state_filter, + await_full_state=await_full_state, + ) + + # Now roll back the state by looking at the state deltas between + # end_token and now. + deltas = await self.store.get_current_state_deltas_for_room( + room_id, + from_token=end_token.room_key, + to_token=self.store.get_room_max_token(), + ) + if deltas: + mutable_state_ids = dict(state_ids) + + # We iterate over the deltas backwards so that if there are + # multiple changes of the same type/state_key we'll + # correctly pick the earliest delta. + for delta in reversed(deltas): + if delta.prev_event_id: + mutable_state_ids[(delta.event_type, delta.state_key)] = ( + delta.prev_event_id + ) + elif (delta.event_type, delta.state_key) in mutable_state_ids: + mutable_state_ids.pop((delta.event_type, delta.state_key)) + + state_ids = mutable_state_ids + + return state_ids + + else: + # Just use state groups to get the state at the end of the + # timeline, i.e. the state at the leave/etc event. + state_at_timeline_end = ( + await self._state_storage_controller.get_state_ids_at( + room_id, + stream_position=end_token, + state_filter=state_filter, + await_full_state=await_full_state, + ) + ) + return state_at_timeline_end + state_at_timeline_end = await self._state_storage_controller.get_state_ids_at( room_id, stream_position=end_token, @@ -1413,6 +1471,7 @@ class SyncHandler: async def _compute_state_delta_for_incremental_sync( self, room_id: str, + sync_config: SyncConfig, batch: TimelineBatch, since_token: StreamToken, end_token: StreamToken, @@ -1427,8 +1486,12 @@ class SyncHandler: (`compute_state_delta`) is responsible for keeping track of which membership events we have already sent to the client, and hence ripping them out. + Note that whether this returns the state at the start or the end of the + batch depends on `sync_config.use_state_after` (c.f. MSC4222). + Args: room_id: The room we are calculating for. + sync_config batch: The timeline batch for the room that will be sent to the user. since_token: Token of the end of the previous batch. end_token: Token of the end of the current batch. Normally this will be @@ -1441,7 +1504,7 @@ class SyncHandler: Returns: A map from (type, state_key) to event_id, for each event that we believe - should be included in the `state` part of the sync response. + should be included in the `state` or `state_after` part of the sync response. """ if members_to_fetch is not None: # Lazy-loading is enabled. Only return the state that is needed. @@ -1453,6 +1516,51 @@ class SyncHandler: await_full_state = True lazy_load_members = False + # Check if we are wanting to return the state at the start or end of the + # timeline. If at the end we can just use the current state delta stream. + if sync_config.use_state_after: + delta_state_ids: MutableStateMap[str] = {} + + if members_to_fetch: + # We're lazy-loading, so the client might need some more member + # events to understand the events in this timeline. So we always + # fish out all the member events corresponding to the timeline + # here. The caller will then dedupe any redundant ones. + member_ids = await self._state_storage_controller.get_current_state_ids( + room_id=room_id, + state_filter=StateFilter.from_types( + (EventTypes.Member, member) for member in members_to_fetch + ), + await_full_state=await_full_state, + ) + delta_state_ids.update(member_ids) + + # We don't do LL filtering for incremental syncs - see + # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 + # N.B. this slows down incr syncs as we are now processing way more + # state in the server than if we were LLing. + # + # i.e. we return all state deltas, including membership changes that + # we'd normally exclude due to LL. + deltas = await self.store.get_current_state_deltas_for_room( + room_id=room_id, + from_token=since_token.room_key, + to_token=end_token.room_key, + ) + for delta in deltas: + if delta.event_id is None: + # There was a state reset and this state entry is no longer + # present, but we have no way of informing the client about + # this, so we just skip it for now. + continue + + # Note that deltas are in stream ordering, so if there are + # multiple deltas for a given type/state_key we'll always pick + # the latest one. + delta_state_ids[(delta.event_type, delta.state_key)] = delta.event_id + + return delta_state_ids + # For a non-gappy sync if the events in the timeline are simply a linear # chain (i.e. no merging/branching of the graph), then we know the state # delta between the end of the previous sync and start of the new one is @@ -1488,13 +1596,16 @@ class SyncHandler: # timeline here. The caller will then dedupe any redundant # ones. - state_ids = await self._state_storage_controller.get_state_ids_for_event( - batch.events[0].event_id, - # we only want members! - state_filter=StateFilter.from_types( - (EventTypes.Member, member) for member in members_to_fetch - ), - await_full_state=False, + state_ids = ( + await self._state_storage_controller.get_state_ids_for_event( + batch.events[0].event_id, + # we only want members! + state_filter=StateFilter.from_types( + (EventTypes.Member, member) + for member in members_to_fetch + ), + await_full_state=False, + ) ) return state_ids @@ -1779,8 +1890,15 @@ class SyncHandler: ) if include_device_list_updates: - device_lists = await self._generate_sync_entry_for_device_list( - sync_result_builder, + # include_device_list_updates can only be True if we have a + # since token. + assert since_token is not None + + device_lists = await self._device_handler.generate_sync_entry_for_device_list( + user_id=user_id, + since_token=since_token, + now_token=sync_result_builder.now_token, + joined_room_ids=sync_result_builder.joined_room_ids, newly_joined_rooms=newly_joined_rooms, newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users, newly_left_rooms=newly_left_rooms, @@ -1892,8 +2010,14 @@ class SyncHandler: newly_left_users, ) = sync_result_builder.calculate_user_changes() - device_lists = await self._generate_sync_entry_for_device_list( - sync_result_builder, + # include_device_list_updates can only be True if we have a + # since token. + assert since_token is not None + device_lists = await self._device_handler.generate_sync_entry_for_device_list( + user_id=user_id, + since_token=since_token, + now_token=sync_result_builder.now_token, + joined_room_ids=sync_result_builder.joined_room_ids, newly_joined_rooms=newly_joined_rooms, newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users, newly_left_rooms=newly_left_rooms, @@ -2070,94 +2194,6 @@ class SyncHandler: return sync_result_builder - @measure_func("_generate_sync_entry_for_device_list") - async def _generate_sync_entry_for_device_list( - self, - sync_result_builder: "SyncResultBuilder", - newly_joined_rooms: AbstractSet[str], - newly_joined_or_invited_or_knocked_users: AbstractSet[str], - newly_left_rooms: AbstractSet[str], - newly_left_users: AbstractSet[str], - ) -> DeviceListUpdates: - """Generate the DeviceListUpdates section of sync - - Args: - sync_result_builder - newly_joined_rooms: Set of rooms user has joined since previous sync - newly_joined_or_invited_or_knocked_users: Set of users that have joined, - been invited to a room or are knocking on a room since - previous sync. - newly_left_rooms: Set of rooms user has left since previous sync - newly_left_users: Set of users that have left a room we're in since - previous sync - """ - - user_id = sync_result_builder.sync_config.user.to_string() - since_token = sync_result_builder.since_token - assert since_token is not None - - # Take a copy since these fields will be mutated later. - newly_joined_or_invited_or_knocked_users = set( - newly_joined_or_invited_or_knocked_users - ) - newly_left_users = set(newly_left_users) - - # We want to figure out what user IDs the client should refetch - # device keys for, and which users we aren't going to track changes - # for anymore. - # - # For the first step we check: - # a. if any users we share a room with have updated their devices, - # and - # b. we also check if we've joined any new rooms, or if a user has - # joined a room we're in. - # - # For the second step we just find any users we no longer share a - # room with by looking at all users that have left a room plus users - # that were in a room we've left. - - users_that_have_changed = set() - - joined_room_ids = sync_result_builder.joined_room_ids - - # Step 1a, check for changes in devices of users we share a room - # with - users_that_have_changed = ( - await self._device_handler.get_device_changes_in_shared_rooms( - user_id, - joined_room_ids, - from_token=since_token, - now_token=sync_result_builder.now_token, - ) - ) - - # Step 1b, check for newly joined rooms - for room_id in newly_joined_rooms: - joined_users = await self.store.get_users_in_room(room_id) - newly_joined_or_invited_or_knocked_users.update(joined_users) - - # TODO: Check that these users are actually new, i.e. either they - # weren't in the previous sync *or* they left and rejoined. - users_that_have_changed.update(newly_joined_or_invited_or_knocked_users) - - user_signatures_changed = await self.store.get_users_whose_signatures_changed( - user_id, since_token.device_list_key - ) - users_that_have_changed.update(user_signatures_changed) - - # Now find users that we no longer track - for room_id in newly_left_rooms: - left_users = await self.store.get_users_in_room(room_id) - newly_left_users.update(left_users) - - # Remove any users that we still share a room with. - left_users_rooms = await self.store.get_rooms_for_users(newly_left_users) - for user_id, entries in left_users_rooms.items(): - if any(rid in joined_room_ids for rid in entries): - newly_left_users.discard(user_id) - - return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users) - @trace async def _generate_sync_entry_for_to_device( self, sync_result_builder: "SyncResultBuilder" @@ -2241,18 +2277,18 @@ class SyncHandler: if push_rules_changed: global_account_data = dict(global_account_data) - global_account_data[AccountDataTypes.PUSH_RULES] = ( - await self._push_rules_handler.push_rules_for_user(sync_config.user) - ) + global_account_data[ + AccountDataTypes.PUSH_RULES + ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) else: all_global_account_data = await self.store.get_global_account_data_for_user( user_id ) global_account_data = dict(all_global_account_data) - global_account_data[AccountDataTypes.PUSH_RULES] = ( - await self._push_rules_handler.push_rules_for_user(sync_config.user) - ) + global_account_data[ + AccountDataTypes.PUSH_RULES + ] = await self._push_rules_handler.push_rules_for_user(sync_config.user) account_data_for_user = ( await sync_config.filter_collection.filter_global_account_data( @@ -2514,6 +2550,7 @@ class SyncHandler: room_entries: List[RoomSyncResultBuilder] = [] invited: List[InvitedSyncResult] = [] knocked: List[KnockedSyncResult] = [] + invite_config = await self.store.get_invite_config_for_user(user_id) for room_id, events in mem_change_events_by_room_id.items(): # The body of this loop will add this room to at least one of the five lists # above. Things get messy if you've e.g. joined, left, joined then left the @@ -2596,7 +2633,11 @@ class SyncHandler: # Only bother if we're still currently invited should_invite = last_non_join.membership == Membership.INVITE if should_invite: - if last_non_join.sender not in ignored_users: + if ( + last_non_join.sender not in ignored_users + and invite_config.get_invite_rule(last_non_join.sender) + != InviteRule.IGNORE + ): invite_room_sync = InvitedSyncResult(room_id, invite=last_non_join) if invite_room_sync: invited.append(invite_room_sync) @@ -2683,7 +2724,7 @@ class SyncHandler: newly_joined = room_id in newly_joined_rooms if room_entry: - events, start_key = room_entry + events, start_key, _ = room_entry # We want to return the events in ascending order (the last event is the # most recent). events.reverse() @@ -2751,6 +2792,7 @@ class SyncHandler: membership_list=Membership.LIST, excluded_rooms=sync_result_builder.excluded_room_ids, ) + invite_config = await self.store.get_invite_config_for_user(user_id) room_entries = [] invited = [] @@ -2776,6 +2818,8 @@ class SyncHandler: elif event.membership == Membership.INVITE: if event.sender in ignored_users: continue + if invite_config.get_invite_rule(event.sender) == InviteRule.IGNORE: + continue invite = await self.store.get_event(event.event_id) invited.append(InvitedSyncResult(room_id=event.room_id, invite=invite)) elif event.membership == Membership.KNOCK: @@ -2947,6 +2991,7 @@ class SyncHandler: since_token, room_builder.end_token, full_state=full_state, + joined=room_builder.rtype == "joined", ) else: # An out of band room won't have any state changes. diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py
index 32dca8c43b..477961d78c 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py
@@ -157,104 +157,6 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker): ) -class _BaseThreepidAuthChecker: - def __init__(self, hs: "HomeServer"): - self.hs = hs - self.store = hs.get_datastores().main - - async def _check_threepid(self, medium: str, authdict: dict) -> dict: - if "threepid_creds" not in authdict: - raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM) - - threepid_creds = authdict["threepid_creds"] - - identity_handler = self.hs.get_identity_handler() - - logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) - - # msisdns are currently always verified via the IS - if medium == "msisdn": - if not self.hs.config.registration.account_threepid_delegate_msisdn: - raise SynapseError( - 400, "Phone number verification is not enabled on this homeserver" - ) - threepid = await identity_handler.threepid_from_creds( - self.hs.config.registration.account_threepid_delegate_msisdn, - threepid_creds, - ) - elif medium == "email": - if self.hs.config.email.can_verify_email: - threepid = None - row = await self.store.get_threepid_validation_session( - medium, - threepid_creds["client_secret"], - sid=threepid_creds["sid"], - validated=True, - ) - - if row: - threepid = { - "medium": row.medium, - "address": row.address, - "validated_at": row.validated_at, - } - - # Valid threepid returned, delete from the db - await self.store.delete_threepid_session(threepid_creds["sid"]) - else: - raise SynapseError( - 400, "Email address verification is not enabled on this homeserver" - ) - else: - # this can't happen! - raise AssertionError("Unrecognized threepid medium: %s" % (medium,)) - - if not threepid: - raise LoginError( - 401, "Unable to get validated threepid", errcode=Codes.UNAUTHORIZED - ) - - if threepid["medium"] != medium: - raise LoginError( - 401, - "Expecting threepid of type '%s', got '%s'" - % (medium, threepid["medium"]), - errcode=Codes.UNAUTHORIZED, - ) - - threepid["threepid_creds"] = authdict["threepid_creds"] - - return threepid - - -class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): - AUTH_TYPE = LoginType.EMAIL_IDENTITY - - def __init__(self, hs: "HomeServer"): - UserInteractiveAuthChecker.__init__(self, hs) - _BaseThreepidAuthChecker.__init__(self, hs) - - def is_enabled(self) -> bool: - return self.hs.config.email.can_verify_email - - async def check_auth(self, authdict: dict, clientip: str) -> Any: - return await self._check_threepid("email", authdict) - - -class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): - AUTH_TYPE = LoginType.MSISDN - - def __init__(self, hs: "HomeServer"): - UserInteractiveAuthChecker.__init__(self, hs) - _BaseThreepidAuthChecker.__init__(self, hs) - - def is_enabled(self) -> bool: - return bool(self.hs.config.registration.account_threepid_delegate_msisdn) - - async def check_auth(self, authdict: dict, clientip: str) -> Any: - return await self._check_threepid("msisdn", authdict) - - class RegistrationTokenAuthChecker(UserInteractiveAuthChecker): AUTH_TYPE = LoginType.REGISTRATION_TOKEN @@ -263,7 +165,7 @@ class RegistrationTokenAuthChecker(UserInteractiveAuthChecker): self.hs = hs self._enabled = bool( hs.config.registration.registration_requires_token - ) or bool(hs.config.registration.enable_registration_token_3pid_bypass) + ) self.store = hs.get_datastores().main def is_enabled(self) -> bool: @@ -325,8 +227,6 @@ INTERACTIVE_AUTH_CHECKERS: Sequence[Type[UserInteractiveAuthChecker]] = [ DummyAuthChecker, TermsAuthChecker, RecaptchaAuthChecker, - EmailIdentityAuthChecker, - MsisdnAuthChecker, RegistrationTokenAuthChecker, ] """A list of UserInteractiveAuthChecker classes""" diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index a343637b82..33edef5f14 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py
@@ -26,7 +26,13 @@ from typing import TYPE_CHECKING, List, Optional, Set, Tuple from twisted.internet.interfaces import IDelayedCall import synapse.metrics -from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership +from synapse.api.constants import ( + EventTypes, + HistoryVisibility, + JoinRules, + Membership, + ProfileFields, +) from synapse.api.errors import Codes, SynapseError from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.metrics.background_process_metrics import run_as_background_process @@ -102,6 +108,9 @@ class UserDirectoryHandler(StateDeltasHandler): self.is_mine_id = hs.is_mine_id self.update_user_directory = hs.config.worker.should_update_user_directory self.search_all_users = hs.config.userdirectory.user_directory_search_all_users + self.exclude_remote_users = ( + hs.config.userdirectory.user_directory_exclude_remote_users + ) self.show_locked_users = hs.config.userdirectory.show_locked_users self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self._hs = hs @@ -161,7 +170,7 @@ class UserDirectoryHandler(StateDeltasHandler): non_spammy_users = [] for user in results["results"]: if not await self._spam_checker_module_callbacks.check_username_for_spam( - user + user, user_id ): non_spammy_users.append(user) results["results"] = non_spammy_users @@ -756,6 +765,10 @@ class UserDirectoryHandler(StateDeltasHandler): await self.store.update_profile_in_user_dir( user_id, - display_name=non_null_str_or_none(profile.get("displayname")), - avatar_url=non_null_str_or_none(profile.get("avatar_url")), + display_name=non_null_str_or_none( + profile.get(ProfileFields.DISPLAYNAME) + ), + avatar_url=non_null_str_or_none( + profile.get(ProfileFields.AVATAR_URL) + ), ) diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py
index 7e578cf462..e58a416026 100644 --- a/synapse/handlers/worker_lock.py +++ b/synapse/handlers/worker_lock.py
@@ -19,6 +19,7 @@ # # +import logging import random from types import TracebackType from typing import ( @@ -183,7 +184,7 @@ class WorkerLocksHandler: return def _wake_all_locks( - locks: Collection[Union[WaitingLock, WaitingMultiLock]] + locks: Collection[Union[WaitingLock, WaitingMultiLock]], ) -> None: for lock in locks: deferred = lock.deferred @@ -269,6 +270,10 @@ class WaitingLock: def _get_next_retry_interval(self) -> float: next = self._retry_interval self._retry_interval = max(5, next * 2) + if self._retry_interval > 5 * 2 ^ 7: # ~10 minutes + logging.warning( + f"Lock timeout is getting excessive: {self._retry_interval}s. There may be a deadlock." + ) return next * random.uniform(0.9, 1.1) @@ -344,4 +349,8 @@ class WaitingMultiLock: def _get_next_retry_interval(self) -> float: next = self._retry_interval self._retry_interval = max(5, next * 2) + if self._retry_interval > 5 * 2 ^ 7: # ~10 minutes + logging.warning( + f"Lock timeout is getting excessive: {self._retry_interval}s. There may be a deadlock." + ) return next * random.uniform(0.9, 1.1) diff --git a/synapse/http/client.py b/synapse/http/client.py
index 56ad28eabf..84a510fb42 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py
@@ -31,18 +31,17 @@ from typing import ( List, Mapping, Optional, + Protocol, Tuple, Union, ) import attr -import multipart import treq from canonicaljson import encode_canonical_json from netaddr import AddrFormatError, IPAddress, IPSet from prometheus_client import Counter -from typing_extensions import Protocol -from zope.interface import implementer, provider +from zope.interface import implementer from OpenSSL import SSL from OpenSSL.SSL import VERIFY_NONE @@ -93,6 +92,20 @@ from synapse.util.async_helpers import timeout_deferred if TYPE_CHECKING: from synapse.server import HomeServer +# Support both import names for the `python-multipart` (PyPI) library, +# which renamed its package name from `multipart` to `python_multipart` +# in 0.0.13 (though supports the old import name for compatibility). +# Note that the `multipart` package name conflicts with `multipart` (PyPI) +# so we should prefer importing from `python_multipart` when possible. +try: + from python_multipart import MultipartParser + + if TYPE_CHECKING: + from python_multipart import multipart +except ImportError: + from multipart import MultipartParser # type: ignore[no-redef] + + logger = logging.getLogger(__name__) outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"]) @@ -212,7 +225,7 @@ class _IPBlockingResolver: recv.addressResolved(address) recv.resolutionComplete() - @provider(IResolutionReceiver) + @implementer(IResolutionReceiver) class EndpointReceiver: @staticmethod def resolutionBegan(resolutionInProgress: IHostResolution) -> None: @@ -226,8 +239,9 @@ class _IPBlockingResolver: def resolutionComplete() -> None: _callback() + endpoint_receiver_wrapper = EndpointReceiver() self._reactor.nameResolver.resolveHostName( - EndpointReceiver, hostname, portNumber=portNumber + endpoint_receiver_wrapper, hostname, portNumber=portNumber ) return recv @@ -1039,7 +1053,7 @@ class _MultipartParserProtocol(protocol.Protocol): self.deferred = deferred self.boundary = boundary self.max_length = max_length - self.parser = None + self.parser: Optional[MultipartParser] = None self.multipart_response = MultipartResponse() self.has_redirect = False self.in_json = False @@ -1057,11 +1071,11 @@ class _MultipartParserProtocol(protocol.Protocol): if not self.parser: def on_header_field(data: bytes, start: int, end: int) -> None: - if data[start:end] == b"Location": + if data[start:end].lower() == b"location": self.has_redirect = True - if data[start:end] == b"Content-Disposition": + if data[start:end].lower() == b"content-disposition": self.in_disposition = True - if data[start:end] == b"Content-Type": + if data[start:end].lower() == b"content-type": self.in_content_type = True def on_header_value(data: bytes, start: int, end: int) -> None: @@ -1088,7 +1102,6 @@ class _MultipartParserProtocol(protocol.Protocol): return # otherwise we are in the file part else: - logger.info("Writing multipart file data to stream") try: self.stream.write(data[start:end]) except Exception as e: @@ -1098,12 +1111,12 @@ class _MultipartParserProtocol(protocol.Protocol): self.deferred.errback() self.file_length += end - start - callbacks = { + callbacks: "multipart.MultipartCallbacks" = { "on_header_field": on_header_field, "on_header_value": on_header_value, "on_part_data": on_part_data, } - self.parser = multipart.MultipartParser(self.boundary, callbacks) + self.parser = MultipartParser(self.boundary, callbacks) self.total_length += len(incoming_data) if self.max_length is not None and self.total_length >= self.max_length: @@ -1114,7 +1127,7 @@ class _MultipartParserProtocol(protocol.Protocol): self.transport.abortConnection() try: - self.parser.write(incoming_data) # type: ignore[attr-defined] + self.parser.write(incoming_data) except Exception as e: logger.warning(f"Exception writing to multipart parser: {e}") self.deferred.errback() @@ -1314,6 +1327,5 @@ def is_unknown_endpoint( ) ) or ( # Older Synapses returned a 400 error. - e.code == 400 - and synapse_error.errcode == Codes.UNRECOGNIZED + e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED ) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 6fd75fd381..88bf98045c 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py
@@ -19,7 +19,6 @@ # # import abc -import cgi import codecs import logging import random @@ -35,6 +34,7 @@ from typing import ( Dict, Generic, List, + Literal, Optional, TextIO, Tuple, @@ -49,7 +49,6 @@ import treq from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json -from typing_extensions import Literal from twisted.internet import defer from twisted.internet.error import DNSLookupError @@ -426,9 +425,9 @@ class MatrixFederationHttpClient: ) else: proxy_authorization_secret = hs.config.worker.worker_replication_secret - assert ( - proxy_authorization_secret is not None - ), "`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)" + assert proxy_authorization_secret is not None, ( + "`worker_replication_secret` must be set when using `outbound_federation_restricted_to` (used to authenticate requests across workers)" + ) federation_proxy_credentials = BearerProxyCredentials( proxy_authorization_secret.encode("ascii") ) @@ -792,7 +791,7 @@ class MatrixFederationHttpClient: url_str, _flatten_response_never_received(e), ) - body = None + body = b"" exc = HttpResponseException( response.code, response_phrase, body @@ -1756,8 +1755,10 @@ class MatrixFederationHttpClient: request.destination, str_url, ) + # We don't know how large the response will be upfront, so limit it to + # the `max_size` config value. length, headers, _, _ = await self._simple_http_client.get_file( - str_url, output_stream, expected_size + str_url, output_stream, max_size ) logger.info( @@ -1811,8 +1812,9 @@ def check_content_type_is(headers: Headers, expected_content_type: str) -> None: ) c_type = content_type_headers[0].decode("ascii") # only the first header - val, options = cgi.parse_header(c_type) - if val != expected_content_type: + # Extract the 'essence' of the mimetype, removing any parameter + c_type_parsed = c_type.split(";", 1)[0].strip() + if c_type_parsed != expected_content_type: raise RequestSendFailed( RuntimeError( f"Remote server sent Content-Type header of '{c_type}', not '{expected_content_type}'", diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py
index 97aa429e7d..5cd990b0d0 100644 --- a/synapse/http/proxy.py +++ b/synapse/http/proxy.py
@@ -51,25 +51,17 @@ logger = logging.getLogger(__name__) # "Hop-by-hop" headers (as opposed to "end-to-end" headers) as defined by RFC2616 # section 13.5.1 and referenced in RFC9110 section 7.6.1. These are meant to only be # consumed by the immediate recipient and not be forwarded on. -HOP_BY_HOP_HEADERS = { - "Connection", - "Keep-Alive", - "Proxy-Authenticate", - "Proxy-Authorization", - "TE", - "Trailers", - "Transfer-Encoding", - "Upgrade", +HOP_BY_HOP_HEADERS_LOWERCASE = { + "connection", + "keep-alive", + "proxy-authenticate", + "proxy-authorization", + "te", + "trailers", + "transfer-encoding", + "upgrade", } - -if hasattr(Headers, "_canonicalNameCaps"): - # Twisted < 24.7.0rc1 - _canonicalHeaderName = Headers()._canonicalNameCaps # type: ignore[attr-defined] -else: - # Twisted >= 24.7.0rc1 - # But note that `_encodeName` still exists on prior versions, - # it just encodes differently - _canonicalHeaderName = Headers()._encodeName +assert all(header.lower() == header for header in HOP_BY_HOP_HEADERS_LOWERCASE) def parse_connection_header_value( @@ -92,12 +84,12 @@ def parse_connection_header_value( Returns: The set of header names that should not be copied over from the remote response. - The keys are capitalized in canonical capitalization. + The keys are lowercased. """ extra_headers_to_remove: Set[str] = set() if connection_header_value: extra_headers_to_remove = { - _canonicalHeaderName(connection_option.strip()).decode("ascii") + connection_option.decode("ascii").strip().lower() for connection_option in connection_header_value.split(b",") } @@ -194,7 +186,7 @@ class ProxyResource(_AsyncResource): # The `Connection` header also defines which headers should not be copied over. connection_header = response_headers.getRawHeaders(b"connection") - extra_headers_to_remove = parse_connection_header_value( + extra_headers_to_remove_lowercase = parse_connection_header_value( connection_header[0] if connection_header else None ) @@ -202,10 +194,10 @@ class ProxyResource(_AsyncResource): for k, v in response_headers.getAllRawHeaders(): # Do not copy over any hop-by-hop headers. These are meant to only be # consumed by the immediate recipient and not be forwarded on. - header_key = k.decode("ascii") + header_key_lowercase = k.decode("ascii").lower() if ( - header_key in HOP_BY_HOP_HEADERS - or header_key in extra_headers_to_remove + header_key_lowercase in HOP_BY_HOP_HEADERS_LOWERCASE + or header_key_lowercase in extra_headers_to_remove_lowercase ): continue diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py
index f80f67acc6..6817199035 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py
@@ -21,7 +21,7 @@ import logging import random import re -from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple +from typing import Any, Collection, Dict, List, Optional, Sequence, Tuple, Union from urllib.parse import urlparse from urllib.request import ( # type: ignore[attr-defined] getproxies_environment, @@ -150,6 +150,12 @@ class ProxyAgent(_AgentBase): http_proxy = proxies["http"].encode() if "http" in proxies else None https_proxy = proxies["https"].encode() if "https" in proxies else None no_proxy = proxies["no"] if "no" in proxies else None + logger.debug( + "Using proxy settings: http_proxy=%s, https_proxy=%s, no_proxy=%s", + http_proxy, + https_proxy, + no_proxy, + ) self.http_proxy_endpoint, self.http_proxy_creds = http_proxy_endpoint( http_proxy, self.proxy_reactor, contextFactory, **self._endpoint_kwargs @@ -167,9 +173,9 @@ class ProxyAgent(_AgentBase): self._federation_proxy_endpoint: Optional[IStreamClientEndpoint] = None self._federation_proxy_credentials: Optional[ProxyCredentials] = None if federation_proxy_locations: - assert ( - federation_proxy_credentials is not None - ), "`federation_proxy_credentials` are required when using `federation_proxy_locations`" + assert federation_proxy_credentials is not None, ( + "`federation_proxy_credentials` are required when using `federation_proxy_locations`" + ) endpoints: List[IStreamClientEndpoint] = [] for federation_proxy_location in federation_proxy_locations: @@ -296,9 +302,9 @@ class ProxyAgent(_AgentBase): parsed_uri.scheme == b"matrix-federation" and self._federation_proxy_endpoint ): - assert ( - self._federation_proxy_credentials is not None - ), "`federation_proxy_credentials` are required when using `federation_proxy_locations`" + assert self._federation_proxy_credentials is not None, ( + "`federation_proxy_credentials` are required when using `federation_proxy_locations`" + ) # Set a Proxy-Authorization header if headers is None: @@ -351,7 +357,9 @@ def http_proxy_endpoint( proxy: Optional[bytes], reactor: IReactorCore, tls_options_factory: Optional[IPolicyForHTTPS], - **kwargs: object, + timeout: float = 30, + bindAddress: Optional[Union[bytes, str, tuple[Union[bytes, str], int]]] = None, + attemptDelay: Optional[float] = None, ) -> Tuple[Optional[IStreamClientEndpoint], Optional[ProxyCredentials]]: """Parses an http proxy setting and returns an endpoint for the proxy @@ -382,12 +390,15 @@ def http_proxy_endpoint( # 3.9+) on scheme-less proxies, e.g. host:port. scheme, host, port, credentials = parse_proxy(proxy) - proxy_endpoint = HostnameEndpoint(reactor, host, port, **kwargs) + proxy_endpoint = HostnameEndpoint( + reactor, host, port, timeout, bindAddress, attemptDelay + ) if scheme == b"https": if tls_options_factory: tls_options = tls_options_factory.creatorForNetloc(host, port) - proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint) + wrapped_proxy_endpoint = wrapClientTLS(tls_options, proxy_endpoint) + return wrapped_proxy_endpoint, credentials else: raise RuntimeError( f"No TLS options for a https connection via proxy {proxy!s}" diff --git a/synapse/http/replicationagent.py b/synapse/http/replicationagent.py
index ee8c707062..4eabbc8af9 100644 --- a/synapse/http/replicationagent.py +++ b/synapse/http/replicationagent.py
@@ -89,7 +89,7 @@ class ReplicationEndpointFactory: location_config.port, ) if scheme == "https": - endpoint = wrapClientTLS( + wrapped_endpoint = wrapClientTLS( # The 'port' argument below isn't actually used by the function self.context_factory.creatorForNetloc( location_config.host.encode("utf-8"), @@ -97,6 +97,8 @@ class ReplicationEndpointFactory: ), endpoint, ) + return wrapped_endpoint + return endpoint elif isinstance(location_config, InstanceUnixLocationConfig): return UNIXClientEndpoint(self.reactor, location_config.path) diff --git a/synapse/http/server.py b/synapse/http/server.py
index 0d0c610b28..bdd90d8a73 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py
@@ -39,6 +39,7 @@ from typing import ( List, Optional, Pattern, + Protocol, Tuple, Union, ) @@ -46,7 +47,6 @@ from typing import ( import attr import jinja2 from canonicaljson import encode_canonical_json -from typing_extensions import Protocol from zope.interface import implementer from twisted.internet import defer, interfaces @@ -74,7 +74,6 @@ from synapse.api.errors import ( from synapse.config.homeserver import HomeServerConfig from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background from synapse.logging.opentracing import active_span, start_active_span, trace_servlet -from synapse.types import ISynapseReactor from synapse.util import json_encoder from synapse.util.caches import intern_dict from synapse.util.cancellation import is_function_cancellable @@ -142,7 +141,7 @@ def return_json_error( ) else: error_code = 500 - error_dict = {"error": "Internal server error", "errcode": Codes.UNKNOWN} + error_dict = {"error": "Internal server error", "errcode": Codes.UNKNOWN, "data": f.getTraceback()} logger.error( "Failed handle request via %r: %r", @@ -234,7 +233,7 @@ def return_html_error( def wrap_async_request_handler( - h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]] + h: Callable[["_AsyncResource", "SynapseRequest"], Awaitable[None]], ) -> Callable[["_AsyncResource", "SynapseRequest"], "defer.Deferred[None]"]: """Wraps an async request handler so that it calls request.processing. @@ -869,8 +868,7 @@ async def _async_write_json_to_request_in_thread( with start_active_span("encode_json_response"): span = active_span() - reactor: ISynapseReactor = request.reactor # type: ignore - json_str = await defer_to_thread(reactor, encode, span) + json_str = await defer_to_thread(request.reactor, encode, span) _write_bytes_to_request(request, json_str) @@ -923,15 +921,6 @@ def set_cors_headers(request: "SynapseRequest") -> None: b"Access-Control-Expose-Headers", b"Synapse-Trace-Id, Server, ETag", ) - elif request.experimental_cors_msc3886: - request.setHeader( - b"Access-Control-Allow-Headers", - b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match", - ) - request.setHeader( - b"Access-Control-Expose-Headers", - b"ETag, Location, X-Max-Bytes", - ) else: request.setHeader( b"Access-Control-Allow-Headers", diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py
index 08b8ff7afd..47d8bd5eaf 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py
@@ -28,6 +28,7 @@ from http import HTTPStatus from typing import ( TYPE_CHECKING, List, + Literal, Mapping, Optional, Sequence, @@ -37,19 +38,15 @@ from typing import ( overload, ) -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel, MissingError, PydanticValueError, ValidationError - from pydantic.v1.error_wrappers import ErrorWrapper -else: - from pydantic import BaseModel, MissingError, PydanticValueError, ValidationError - from pydantic.error_wrappers import ErrorWrapper - -from typing_extensions import Literal - from twisted.web.server import Request +from synapse._pydantic_compat import ( + BaseModel, + ErrorWrapper, + MissingError, + PydanticValueError, + ValidationError, +) from synapse.api.errors import Codes, SynapseError from synapse.http import redact_uri from synapse.http.server import HttpServer @@ -585,9 +582,9 @@ def parse_enum( is not one of those allowed values. """ # Assert the enum values are strings. - assert all( - isinstance(e.value, str) for e in E - ), "parse_enum only works with string values" + assert all(isinstance(e.value, str) for e in E), ( + "parse_enum only works with string values" + ) str_value = parse_string( request, name, diff --git a/synapse/http/site.py b/synapse/http/site.py
index af169ba51e..e83a4447b2 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py
@@ -21,6 +21,7 @@ import contextlib import logging import time +from http import HTTPStatus from typing import TYPE_CHECKING, Any, Generator, Optional, Tuple, Union import attr @@ -94,7 +95,6 @@ class SynapseRequest(Request): self.reactor = site.reactor self._channel = channel # this is used by the tests self.start_time = 0.0 - self.experimental_cors_msc3886 = site.experimental_cors_msc3886 # The requester, if authenticated. For federation requests this is the # server name, for client requests this is the Requester object. @@ -140,6 +140,41 @@ class SynapseRequest(Request): self.synapse_site.site_tag, ) + # Twisted machinery: this method is called by the Channel once the full request has + # been received, to dispatch the request to a resource. + # + # We're patching Twisted to bail/abort early when we see someone trying to upload + # `multipart/form-data` so we can avoid Twisted parsing the entire request body into + # in-memory (specific problem of this specific `Content-Type`). This protects us + # from an attacker uploading something bigger than the available RAM and crashing + # the server with a `MemoryError`, or carefully block just enough resources to cause + # all other requests to fail. + # + # FIXME: This can be removed once we Twisted releases a fix and we update to a + # version that is patched + def requestReceived(self, command: bytes, path: bytes, version: bytes) -> None: + if command == b"POST": + ctype = self.requestHeaders.getRawHeaders(b"content-type") + if ctype and b"multipart/form-data" in ctype[0]: + self.method, self.uri = command, path + self.clientproto = version + self.code = HTTPStatus.UNSUPPORTED_MEDIA_TYPE.value + self.code_message = bytes( + HTTPStatus.UNSUPPORTED_MEDIA_TYPE.phrase, "ascii" + ) + self.responseHeaders.setRawHeaders(b"content-length", [b"0"]) + + logger.warning( + "Aborting connection from %s because `content-type: multipart/form-data` is unsupported: %s %s", + self.client, + command, + path, + ) + self.write(b"") + self.loseConnection() + return + return super().requestReceived(command, path, version) + def handleContentChunk(self, data: bytes) -> None: # we should have a `content` by now. assert self.content, "handleContentChunk() called before gotLength()" @@ -658,7 +693,7 @@ class SynapseSite(ProxySite): ) self.site_tag = site_tag - self.reactor = reactor + self.reactor: ISynapseReactor = reactor assert config.http_options is not None proxied = config.http_options.x_forwarded @@ -666,10 +701,6 @@ class SynapseSite(ProxySite): request_id_header = config.http_options.request_id_header - self.experimental_cors_msc3886: bool = ( - config.http_options.experimental_cors_msc3886 - ) - def request_factory(channel: HTTPChannel, queued: bool) -> Request: return request_class( channel, diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py
index f047edee8e..ac34fa6525 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py
@@ -39,7 +39,7 @@ from twisted.internet.endpoints import ( ) from twisted.internet.interfaces import ( IPushProducer, - IReactorTCP, + IReactorTime, IStreamClientEndpoint, ) from twisted.internet.protocol import Factory, Protocol @@ -113,7 +113,7 @@ class RemoteHandler(logging.Handler): port: int, maximum_buffer: int = 1000, level: int = logging.NOTSET, - _reactor: Optional[IReactorTCP] = None, + _reactor: Optional[IReactorTime] = None, ): super().__init__(level=level) self.host = host diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py
index 6a6afbfc0b..d9ff70b252 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py
@@ -22,6 +22,7 @@ """ Log formatters that output terse JSON. """ + import json import logging diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 4650b60962..3ef97f23c9 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py
@@ -20,7 +20,7 @@ # # -""" Thread-local-alike tracking of log contexts within synapse +"""Thread-local-alike tracking of log contexts within synapse This module provides objects and utilities for tracking contexts through synapse code, so that log lines can include a request identifier, and so that @@ -29,6 +29,7 @@ them. See doc/log_contexts.rst for details on how this works. """ + import logging import threading import typing @@ -36,8 +37,10 @@ import warnings from types import TracebackType from typing import ( TYPE_CHECKING, + Any, Awaitable, Callable, + Literal, Optional, Tuple, Type, @@ -47,7 +50,7 @@ from typing import ( ) import attr -from typing_extensions import Literal, ParamSpec +from typing_extensions import ParamSpec from twisted.internet import defer, threads from twisted.python.threadpool import ThreadPool @@ -751,7 +754,7 @@ def preserve_fn( f: Union[ Callable[P, R], Callable[P, Awaitable[R]], - ] + ], ) -> Callable[P, "defer.Deferred[R]"]: """Function decorator which wraps the function with run_in_background""" @@ -849,6 +852,45 @@ def run_in_background( return d +def run_coroutine_in_background( + coroutine: typing.Coroutine[Any, Any, R], +) -> "defer.Deferred[R]": + """Run the coroutine, ensuring that the current context is restored after + return from the function, and that the sentinel context is set once the + deferred returned by the function completes. + + Useful for wrapping coroutines that you don't yield or await on (for + instance because you want to pass it to deferred.gatherResults()). + + This is a special case of `run_in_background` where we can accept a + coroutine directly rather than a function. We can do this because coroutines + do not run until called, and so calling an async function without awaiting + cannot change the log contexts. + """ + + current = current_context() + d = defer.ensureDeferred(coroutine) + + # The function may have reset the context before returning, so + # we need to restore it now. + ctx = set_current_context(current) + + # The original context will be restored when the deferred + # completes, but there is nothing waiting for it, so it will + # get leaked into the reactor or some other function which + # wasn't expecting it. We therefore need to reset the context + # here. + # + # (If this feels asymmetric, consider it this way: we are + # effectively forking a new thread of execution. We are + # probably currently within a ``with LoggingContext()`` block, + # which is supposed to have a single entry and exit point. But + # by spawning off another deferred, we are effectively + # adding a new exit point.) + d.addBoth(_set_context_cb, ctx) + return d + + T = TypeVar("T") diff --git a/synapse/logging/filter.py b/synapse/logging/filter.py
index 11c27c63f2..16de488dbc 100644 --- a/synapse/logging/filter.py +++ b/synapse/logging/filter.py
@@ -19,8 +19,7 @@ # # import logging - -from typing_extensions import Literal +from typing import Literal class MetadataFilter(logging.Filter): diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py
index 7a3c805cc5..d976e58e49 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py
@@ -169,6 +169,7 @@ Gotchas than one caller? Will all of those calling functions have be in a context with an active span? """ + import contextlib import enum import inspect @@ -414,7 +415,7 @@ def ensure_active_span( """ def ensure_active_span_inner_1( - func: Callable[P, R] + func: Callable[P, R], ) -> Callable[P, Union[Optional[T], R]]: @wraps(func) def ensure_active_span_inner_2( @@ -700,7 +701,7 @@ def set_operation_name(operation_name: str) -> None: @only_if_tracing def force_tracing( - span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel + span: Union["opentracing.Span", _Sentinel] = _Sentinel.sentinel, ) -> None: """Force sampling for the active/given span and its children. @@ -1032,13 +1033,13 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: def _wrapping_logic( _func: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> Generator[None, None, None]: - # We use `[1:]` to skip the `self` object reference and `start=1` to - # make the index line up with `argspec.args`. - # - # FIXME: We could update this to handle any type of function by ignoring the - # first argument only if it's named `self` or `cls`. This isn't fool-proof - # but handles the idiomatic cases. - for i, arg in enumerate(args[1:], start=1): + for i, arg in enumerate(args, start=0): + if argspec.args[i] in ("self", "cls"): + # Ignore `self` and `cls` values. Ideally we'd properly detect + # if we were wrapping a method, but that is really non-trivial + # and this is good enough. + continue + set_tag(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg)) set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :])) set_tag(SynapseTags.FUNC_KWARGS, str(kwargs)) @@ -1093,9 +1094,10 @@ def trace_servlet( # Mypy seems to think that start_context.tag below can be Optional[str], but # that doesn't appear to be correct and works in practice. - request_tags[ - SynapseTags.REQUEST_TAG - ] = request.request_metrics.start_context.tag # type: ignore[assignment] + + request_tags[SynapseTags.REQUEST_TAG] = ( + request.request_metrics.start_context.tag # type: ignore[assignment] + ) # set the tags *after* the servlet completes, in case it decided to # prioritise the span (tags will get dropped on unprioritised spans) diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py
index 581e6d6411..feaadc4d87 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py
@@ -20,13 +20,10 @@ # import logging -from types import TracebackType -from typing import Optional, Type +from typing import Optional from opentracing import Scope, ScopeManager, Span -import twisted - from synapse.logging.context import ( LoggingContext, current_context, @@ -112,9 +109,6 @@ class _LogContextScope(Scope): """ A custom opentracing scope, associated with a LogContext - * filters out _DefGen_Return exceptions which arise from calling - `defer.returnValue` in Twisted code - * When the scope is closed, the logcontext's active scope is reset to None. and - if enter_logcontext was set - the logcontext is finished too. """ @@ -146,17 +140,6 @@ class _LogContextScope(Scope): self._finish_on_close = finish_on_close self._enter_logcontext = enter_logcontext - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - value: Optional[BaseException], - traceback: Optional[TracebackType], - ) -> None: - if exc_type == twisted.internet.defer._DefGen_Return: - # filter out defer.returnValue() calls - exc_type = value = traceback = None - super().__exit__(exc_type, value, traceback) - def __str__(self) -> str: return f"Scope<{self.span}>" diff --git a/synapse/media/_base.py b/synapse/media/_base.py
index 1b268ce4d4..2e48d2fdc7 100644 --- a/synapse/media/_base.py +++ b/synapse/media/_base.py
@@ -28,6 +28,7 @@ from types import TracebackType from typing import ( TYPE_CHECKING, Awaitable, + BinaryIO, Dict, Generator, List, @@ -37,21 +38,28 @@ from typing import ( ) import attr +from zope.interface import implementer +from twisted.internet import interfaces +from twisted.internet.defer import Deferred from twisted.internet.interfaces import IConsumer -from twisted.protocols.basic import FileSender +from twisted.python.failure import Failure from twisted.web.server import Request from synapse.api.errors import Codes, cs_error from synapse.http.server import finish_request, respond_with_json from synapse.http.site import SynapseRequest -from synapse.logging.context import make_deferred_yieldable +from synapse.logging.context import ( + defer_to_threadpool, + make_deferred_yieldable, + run_in_background, +) from synapse.util import Clock +from synapse.util.async_helpers import DeferredEvent from synapse.util.stringutils import is_ascii if TYPE_CHECKING: - from synapse.storage.databases.main.media_repository import LocalMedia - + from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -110,6 +118,9 @@ DEFAULT_MAX_TIMEOUT_MS = 20_000 # Maximum allowed timeout_ms for download and thumbnail requests MAXIMUM_ALLOWED_MAX_TIMEOUT_MS = 60_000 +# The ETag header value to use for immutable media. This can be anything. +_IMMUTABLE_ETAG = "1" + def respond_404(request: SynapseRequest) -> None: assert request.path is not None @@ -122,6 +133,7 @@ def respond_404(request: SynapseRequest) -> None: async def respond_with_file( + hs: "HomeServer", request: SynapseRequest, media_type: str, file_path: str, @@ -138,7 +150,7 @@ async def respond_with_file( add_file_headers(request, media_type, file_size, upload_name) with open(file_path, "rb") as f: - await make_deferred_yieldable(FileSender().beginFileTransfer(f, request)) + await ThreadedFileSender(hs).beginFileTransfer(f, request) finish_request(request) else: @@ -215,12 +227,7 @@ def add_file_headers( request.setHeader(b"Content-Disposition", disposition.encode("ascii")) - # cache for at least a day. - # XXX: we might want to turn this off for data we don't want to - # recommend caching as it's sensitive or private - or at least - # select private. don't bother setting Expires as all our - # clients are smart enough to be happy with Cache-Control - request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400") + _add_cache_headers(request) if file_size is not None: request.setHeader(b"Content-Length", b"%d" % (file_size,)) @@ -231,6 +238,26 @@ def add_file_headers( request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex") +def _add_cache_headers(request: Request) -> None: + """Adds the appropriate cache headers to the response""" + + # Cache on the client for at least a day. + # + # We set this to "public,s-maxage=0,proxy-revalidate" to allow CDNs to cache + # the media, so long as they "revalidate" the media on every request. By + # revalidate, we mean send the request to Synapse with a `If-None-Match` + # header, to which Synapse can either respond with a 304 if the user is + # authenticated/authorized, or a 401/403 if they're not. + request.setHeader( + b"Cache-Control", b"public,max-age=86400,s-maxage=0,proxy-revalidate" + ) + + # Set an ETag header to allow requesters to use it in requests to check if + # the cache is still valid. Since media is immutable (though may be + # deleted), we just set this to a constant. + request.setHeader(b"ETag", _IMMUTABLE_ETAG) + + # separators as defined in RFC2616. SP and HT are handled separately. # see _can_encode_filename_as_token. _FILENAME_SEPARATOR_CHARS = { @@ -279,7 +306,9 @@ async def respond_with_multipart_responder( clock: Clock, request: SynapseRequest, responder: "Optional[Responder]", - media_info: "LocalMedia", + media_type: str, + media_length: Optional[int], + upload_name: Optional[str], ) -> None: """ Responds to requests originating from the federation media `/download` endpoint by @@ -303,7 +332,7 @@ async def respond_with_multipart_responder( ) return - if media_info.media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES: + if media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES: disposition = "inline" else: disposition = "attachment" @@ -311,33 +340,35 @@ async def respond_with_multipart_responder( def _quote(x: str) -> str: return urllib.parse.quote(x.encode("utf-8")) - if media_info.upload_name: - if _can_encode_filename_as_token(media_info.upload_name): + if upload_name: + if _can_encode_filename_as_token(upload_name): disposition = "%s; filename=%s" % ( disposition, - media_info.upload_name, + upload_name, ) else: disposition = "%s; filename*=utf-8''%s" % ( disposition, - _quote(media_info.upload_name), + _quote(upload_name), ) from synapse.media.media_storage import MultipartFileConsumer + _add_cache_headers(request) + # note that currently the json_object is just {}, this will change when linked media # is implemented multipart_consumer = MultipartFileConsumer( clock, request, - media_info.media_type, - {}, + media_type, + {}, # Note: if we change this we need to change the returned ETag. disposition, - media_info.media_length, + media_length, ) logger.debug("Responding to media request with responder %s", responder) - if media_info.media_length is not None: + if media_length is not None: content_length = multipart_consumer.content_length() assert content_length is not None request.setHeader(b"Content-Length", b"%d" % (content_length,)) @@ -408,6 +439,46 @@ async def respond_with_responder( finish_request(request) +def respond_with_304(request: SynapseRequest) -> None: + request.setResponseCode(304) + + # could alternatively use request.notifyFinish() and flip a flag when + # the Deferred fires, but since the flag is RIGHT THERE it seems like + # a waste. + if request._disconnected: + logger.warning( + "Not sending response to request %s, already disconnected.", request + ) + return None + + _add_cache_headers(request) + + request.finish() + + +def check_for_cached_entry_and_respond(request: SynapseRequest) -> bool: + """Check if the request has a conditional header that allows us to return a + 304 Not Modified response, and if it does, return a 304 response. + + This handles clients and intermediary proxies caching media. + This method assumes that the user has already been + authorised to request the media. + + Returns True if we have responded.""" + + # We've checked the user has access to the media, so we now check if it + # is a "conditional request" and we can just return a `304 Not Modified` + # response. Since media is immutable (though may be deleted), we just + # check this is the expected constant. + etag = request.getHeader("If-None-Match") + if etag == _IMMUTABLE_ETAG: + # Return a `304 Not modified`. + respond_with_304(request) + return True + + return False + + class Responder(ABC): """Represents a response that can be streamed to the requester. @@ -601,3 +672,151 @@ def _parseparam(s: bytes) -> Generator[bytes, None, None]: f = s[:end] yield f.strip() s = s[end:] + + +@implementer(interfaces.IPushProducer) +class ThreadedFileSender: + """ + A producer that sends the contents of a file to a consumer, reading from the + file on a thread. + + This works by having a loop in a threadpool repeatedly reading from the + file, until the consumer pauses the producer. There is then a loop in the + main thread that waits until the consumer resumes the producer and then + starts reading in the threadpool again. + + This is done to ensure that we're never waiting in the threadpool, as + otherwise its easy to starve it of threads. + """ + + # How much data to read in one go. + CHUNK_SIZE = 2**14 + + # How long we wait for the consumer to be ready again before aborting the + # read. + TIMEOUT_SECONDS = 90.0 + + def __init__(self, hs: "HomeServer") -> None: + self.reactor = hs.get_reactor() + self.thread_pool = hs.get_media_sender_thread_pool() + + self.file: Optional[BinaryIO] = None + self.deferred: "Deferred[None]" = Deferred() + self.consumer: Optional[interfaces.IConsumer] = None + + # Signals if the thread should keep reading/sending data. Set means + # continue, clear means pause. + self.wakeup_event = DeferredEvent(self.reactor) + + # Signals if the thread should terminate, e.g. because the consumer has + # gone away. + self.stop_writing = False + + def beginFileTransfer( + self, file: BinaryIO, consumer: interfaces.IConsumer + ) -> "Deferred[None]": + """ + Begin transferring a file + """ + self.file = file + self.consumer = consumer + + self.consumer.registerProducer(self, True) + + # We set the wakeup signal as we should start producing immediately. + self.wakeup_event.set() + run_in_background(self.start_read_loop) + + return make_deferred_yieldable(self.deferred) + + def resumeProducing(self) -> None: + """interfaces.IPushProducer""" + self.wakeup_event.set() + + def pauseProducing(self) -> None: + """interfaces.IPushProducer""" + self.wakeup_event.clear() + + def stopProducing(self) -> None: + """interfaces.IPushProducer""" + + # Unregister the consumer so we don't try and interact with it again. + if self.consumer: + self.consumer.unregisterProducer() + + self.consumer = None + + # Terminate the loop. + self.stop_writing = True + self.wakeup_event.set() + + if not self.deferred.called: + self.deferred.errback(Exception("Consumer asked us to stop producing")) + + async def start_read_loop(self) -> None: + """This is the loop that drives reading/writing""" + try: + while not self.stop_writing: + # Start the loop in the threadpool to read data. + more_data = await defer_to_threadpool( + self.reactor, self.thread_pool, self._on_thread_read_loop + ) + if not more_data: + # Reached EOF, we can just return. + return + + if not self.wakeup_event.is_set(): + ret = await self.wakeup_event.wait(self.TIMEOUT_SECONDS) + if not ret: + raise Exception("Timed out waiting to resume") + except Exception: + self._error(Failure()) + finally: + self._finish() + + def _on_thread_read_loop(self) -> bool: + """This is the loop that happens on a thread. + + Returns: + Whether there is more data to send. + """ + + while not self.stop_writing and self.wakeup_event.is_set(): + # The file should always have been set before we get here. + assert self.file is not None + + chunk = self.file.read(self.CHUNK_SIZE) + if not chunk: + return False + + self.reactor.callFromThread(self._write, chunk) + + return True + + def _write(self, chunk: bytes) -> None: + """Called from the thread to write a chunk of data""" + if self.consumer: + self.consumer.write(chunk) + + def _error(self, failure: Failure) -> None: + """Called when there was a fatal error""" + if self.consumer: + self.consumer.unregisterProducer() + self.consumer = None + + if not self.deferred.called: + self.deferred.errback(failure) + + def _finish(self) -> None: + """Called when we have finished writing (either on success or + failure).""" + if self.file: + self.file.close() + self.file = None + + if self.consumer: + self.consumer.unregisterProducer() + self.consumer = None + + if not self.deferred.called: + self.deferred.callback(None) diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py
index 8bc92305fe..18c5a8ecec 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py
@@ -52,13 +52,18 @@ from synapse.media._base import ( FileInfo, Responder, ThumbnailInfo, + check_for_cached_entry_and_respond, get_filename_from_headers, respond_404, respond_with_multipart_responder, respond_with_responder, ) from synapse.media.filepath import MediaFilePaths -from synapse.media.media_storage import MediaStorage +from synapse.media.media_storage import ( + MediaStorage, + SHA256TransparentIOReader, + SHA256TransparentIOWriter, +) from synapse.media.storage_provider import StorageProviderWrapper from synapse.media.thumbnailer import Thumbnailer, ThumbnailError from synapse.media.url_previewer import UrlPreviewer @@ -259,7 +264,7 @@ class MediaRepository: """ media = await self.store.get_local_media(media_id) if media is None: - raise SynapseError(404, "Unknow media ID", errcode=Codes.NOT_FOUND) + raise NotFoundError("Unknown media ID") if media.user_id != auth_user.to_string(): raise SynapseError( @@ -300,15 +305,26 @@ class MediaRepository: auth_user: The user_id of the uploader """ file_info = FileInfo(server_name=None, file_id=media_id) - fname = await self.media_storage.store_file(content, file_info) + sha256reader = SHA256TransparentIOReader(content) + # This implements all of IO as it has a passthrough + fname = await self.media_storage.store_file(sha256reader.wrap(), file_info) + sha256 = sha256reader.hexdigest() + should_quarantine = await self.store.get_is_hash_quarantined(sha256) logger.info("Stored local media in file %r", fname) + if should_quarantine: + logger.warn( + "Media has been automatically quarantined as it matched existing quarantined media" + ) + await self.store.update_local_media( media_id=media_id, media_type=media_type, upload_name=upload_name, media_length=content_length, user_id=auth_user, + sha256=sha256, + quarantined_by="system" if should_quarantine else None, ) try: @@ -341,11 +357,19 @@ class MediaRepository: media_id = random_string(24) file_info = FileInfo(server_name=None, file_id=media_id) - - fname = await self.media_storage.store_file(content, file_info) + # This implements all of IO as it has a passthrough + sha256reader = SHA256TransparentIOReader(content) + fname = await self.media_storage.store_file(sha256reader.wrap(), file_info) + sha256 = sha256reader.hexdigest() + should_quarantine = await self.store.get_is_hash_quarantined(sha256) logger.info("Stored local media in file %r", fname) + if should_quarantine: + logger.warn( + "Media has been automatically quarantined as it matched existing quarantined media" + ) + await self.store.store_local_media( media_id=media_id, media_type=media_type, @@ -353,6 +377,8 @@ class MediaRepository: upload_name=upload_name, media_length=content_length, user_id=auth_user, + sha256=sha256, + quarantined_by="system" if should_quarantine else None, ) try: @@ -459,6 +485,11 @@ class MediaRepository: self.mark_recently_accessed(None, media_id) + # Once we've checked auth we can return early if the media is cached on + # the client + if check_for_cached_entry_and_respond(request): + return + media_type = media_info.media_type if not media_type: media_type = "application/octet-stream" @@ -471,7 +502,7 @@ class MediaRepository: responder = await self.media_storage.fetch_media(file_info) if federation: await respond_with_multipart_responder( - self.clock, request, responder, media_info + self.clock, request, responder, media_type, media_length, upload_name ) else: await respond_with_responder( @@ -538,6 +569,17 @@ class MediaRepository: allow_authenticated, ) + # Check if the media is cached on the client, if so return 304. We need + # to do this after we have fetched remote media, as we need it to do the + # auth. + if check_for_cached_entry_and_respond(request): + # We always need to use the responder. + if responder: + with responder: + pass + + return + # We deliberately stream the file outside the lock if responder and media_info: upload_name = name if name else media_info.upload_name @@ -739,11 +781,13 @@ class MediaRepository: file_info = FileInfo(server_name=server_name, file_id=file_id) async with self.media_storage.store_into_file(file_info) as (f, fname): + sha256writer = SHA256TransparentIOWriter(f) try: length, headers = await self.client.download_media( server_name, media_id, - output_stream=f, + # This implements all of BinaryIO as it has a passthrough + output_stream=sha256writer.wrap(), max_size=self.max_upload_size, max_timeout_ms=max_timeout_ms, download_ratelimiter=download_ratelimiter, @@ -808,6 +852,7 @@ class MediaRepository: upload_name=upload_name, media_length=length, filesystem_id=file_id, + sha256=sha256writer.hexdigest(), ) logger.info("Stored remote media in file %r", fname) @@ -828,6 +873,7 @@ class MediaRepository: last_access_ts=time_now_ms, quarantined_by=None, authenticated=authenticated, + sha256=sha256writer.hexdigest(), ) async def _federation_download_remote_file( @@ -862,11 +908,13 @@ class MediaRepository: file_info = FileInfo(server_name=server_name, file_id=file_id) async with self.media_storage.store_into_file(file_info) as (f, fname): + sha256writer = SHA256TransparentIOWriter(f) try: res = await self.client.federation_download_media( server_name, media_id, - output_stream=f, + # This implements all of BinaryIO as it has a passthrough + output_stream=sha256writer.wrap(), max_size=self.max_upload_size, max_timeout_ms=max_timeout_ms, download_ratelimiter=download_ratelimiter, @@ -937,6 +985,7 @@ class MediaRepository: upload_name=upload_name, media_length=length, filesystem_id=file_id, + sha256=sha256writer.hexdigest(), ) logger.debug("Stored remote media in file %r", fname) @@ -957,6 +1006,7 @@ class MediaRepository: last_access_ts=time_now_ms, quarantined_by=None, authenticated=authenticated, + sha256=sha256writer.hexdigest(), ) def _get_thumbnail_requirements( @@ -1008,7 +1058,7 @@ class MediaRepository: t_method: str, t_type: str, url_cache: bool, - ) -> Optional[str]: + ) -> Optional[Tuple[str, FileInfo]]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(None, media_id, url_cache=url_cache) ) @@ -1070,7 +1120,7 @@ class MediaRepository: t_len, ) - return output_path + return output_path, file_info # Could not generate thumbnail. return None diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py
index 2a106bb0eb..afd33c02a1 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py
@@ -19,6 +19,7 @@ # # import contextlib +import hashlib import json import logging import os @@ -49,15 +50,11 @@ from zope.interface import implementer from twisted.internet import interfaces from twisted.internet.defer import Deferred from twisted.internet.interfaces import IConsumer -from twisted.protocols.basic import FileSender from synapse.api.errors import NotFoundError -from synapse.logging.context import ( - defer_to_thread, - make_deferred_yieldable, - run_in_background, -) +from synapse.logging.context import defer_to_thread, run_in_background from synapse.logging.opentracing import start_active_span, trace, trace_with_opname +from synapse.media._base import ThreadedFileSender from synapse.util import Clock from synapse.util.file_consumer import BackgroundFileConsumer @@ -74,6 +71,88 @@ logger = logging.getLogger(__name__) CRLF = b"\r\n" +class SHA256TransparentIOWriter: + """Will generate a SHA256 hash from a source stream transparently. + + Args: + source: Source stream. + """ + + def __init__(self, source: BinaryIO): + self._hash = hashlib.sha256() + self._source = source + + def write(self, buffer: Union[bytes, bytearray]) -> int: + """Wrapper for source.write() + + Args: + buffer + + Returns: + the value of source.write() + """ + res = self._source.write(buffer) + self._hash.update(buffer) + return res + + def hexdigest(self) -> str: + """The digest of the written or read value. + + Returns: + The digest in hex formaat. + """ + return self._hash.hexdigest() + + def wrap(self) -> BinaryIO: + # This class implements a subset the IO interface and passes through everything else via __getattr__ + return cast(BinaryIO, self) + + # Passthrough any other calls + def __getattr__(self, attr_name: str) -> Any: + return getattr(self._source, attr_name) + + +class SHA256TransparentIOReader: + """Will generate a SHA256 hash from a source stream transparently. + + Args: + source: Source IO stream. + """ + + def __init__(self, source: IO): + self._hash = hashlib.sha256() + self._source = source + + def read(self, n: int = -1) -> bytes: + """Wrapper for source.read() + + Args: + n + + Returns: + the value of source.read() + """ + bytes = self._source.read(n) + self._hash.update(bytes) + return bytes + + def hexdigest(self) -> str: + """The digest of the written or read value. + + Returns: + The digest in hex formaat. + """ + return self._hash.hexdigest() + + def wrap(self) -> IO: + # This class implements a subset the IO interface and passes through everything else via __getattr__ + return cast(IO, self) + + # Passthrough any other calls + def __getattr__(self, attr_name: str) -> Any: + return getattr(self._source, attr_name) + + class MediaStorage: """Responsible for storing/fetching files from local sources. @@ -111,7 +190,6 @@ class MediaStorage: Returns: the file path written to in the primary media store """ - async with self.store_into_file(file_info) as (f, fname): # Write to the main media repository await self.write_to_file(source, f) @@ -213,7 +291,7 @@ class MediaStorage: local_path = os.path.join(self.local_media_directory, path) if os.path.exists(local_path): logger.debug("responding with local file %s", local_path) - return FileResponder(open(local_path, "rb")) + return FileResponder(self.hs, open(local_path, "rb")) logger.debug("local file %s did not exist", local_path) for provider in self.storage_providers: @@ -336,13 +414,12 @@ class FileResponder(Responder): is closed when finished streaming. """ - def __init__(self, open_file: IO): + def __init__(self, hs: "HomeServer", open_file: BinaryIO): + self.hs = hs self.open_file = open_file def write_to_consumer(self, consumer: IConsumer) -> Deferred: - return make_deferred_yieldable( - FileSender().beginFileTransfer(self.open_file, consumer) - ) + return ThreadedFileSender(self.hs).beginFileTransfer(self.open_file, consumer) def __exit__( self, @@ -549,7 +626,7 @@ class MultipartFileConsumer: Calculate the content length of the multipart response in bytes. """ - if not self.length: + if self.length is None: return None # calculate length of json field and content-type, disposition headers json_field = json.dumps(self.json_field) diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py
index 06e5d27a53..300952025a 100644 --- a/synapse/media/storage_provider.py +++ b/synapse/media/storage_provider.py
@@ -145,6 +145,7 @@ class FileStorageProviderBackend(StorageProvider): def __init__(self, hs: "HomeServer", config: str): self.hs = hs + self.reactor = hs.get_reactor() self.cache_directory = hs.config.media.media_store_path self.base_directory = config @@ -165,7 +166,7 @@ class FileStorageProviderBackend(StorageProvider): shutil_copyfile: Callable[[str, str], str] = shutil.copyfile with start_active_span("shutil_copyfile"): await defer_to_thread( - self.hs.get_reactor(), + self.reactor, shutil_copyfile, primary_fname, backup_fname, @@ -177,7 +178,7 @@ class FileStorageProviderBackend(StorageProvider): backup_fname = os.path.join(self.base_directory, path) if os.path.isfile(backup_fname): - return FileResponder(open(backup_fname, "rb")) + return FileResponder(self.hs, open(backup_fname, "rb")) return None diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py
index ef6aa8ccf5..5d9afda322 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py
@@ -34,6 +34,7 @@ from synapse.logging.opentracing import trace from synapse.media._base import ( FileInfo, ThumbnailInfo, + check_for_cached_entry_and_respond, respond_404, respond_with_file, respond_with_multipart_responder, @@ -67,6 +68,11 @@ class ThumbnailError(Exception): class Thumbnailer: FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"} + # Which image formats we allow Pillow to open. + # This should intentionally be kept restrictive, because the decoder of any + # format in this list becomes part of our trusted computing base. + PILLOW_FORMATS = ("jpeg", "png", "webp", "gif") + @staticmethod def set_limits(max_image_pixels: int) -> None: Image.MAX_IMAGE_PIXELS = max_image_pixels @@ -76,7 +82,7 @@ class Thumbnailer: self._closed = False try: - self.image = Image.open(input_path) + self.image = Image.open(input_path, formats=self.PILLOW_FORMATS) except OSError as e: # If an error occurs opening the image, a thumbnail won't be able to # be generated. @@ -206,7 +212,7 @@ class Thumbnailer: def _encode_image(self, output_image: Image.Image, output_type: str) -> BytesIO: output_bytes_io = BytesIO() fmt = self.FORMATS[output_type] - if fmt == "JPEG": + if fmt == "JPEG" or fmt == "PNG" and output_image.mode == "CMYK": output_image = output_image.convert("RGB") output_image.save(output_bytes_io, fmt, quality=80) return output_bytes_io @@ -259,6 +265,7 @@ class ThumbnailProvider: media_storage: MediaStorage, ): self.hs = hs + self.reactor = hs.get_reactor() self.media_repo = media_repo self.media_storage = media_storage self.store = hs.get_datastores().main @@ -288,6 +295,11 @@ class ThumbnailProvider: if media_info.authenticated: raise NotFoundError() + # Once we've checked auth we can return early if the media is cached on + # the client + if check_for_cached_entry_and_respond(request): + return + thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) await self._select_and_respond_with_thumbnail( request, @@ -328,6 +340,11 @@ class ThumbnailProvider: if media_info.authenticated: raise NotFoundError() + # Once we've checked auth we can return early if the media is cached on + # the client + if check_for_cached_entry_and_respond(request): + return + thumbnail_infos = await self.store.get_local_media_thumbnails(media_id) for info in thumbnail_infos: t_w = info.width == desired_width @@ -347,7 +364,12 @@ class ThumbnailProvider: if responder: if for_federation: await respond_with_multipart_responder( - self.hs.get_clock(), request, responder, media_info + self.hs.get_clock(), + request, + responder, + info.type, + info.length, + None, ) return else: @@ -359,7 +381,7 @@ class ThumbnailProvider: logger.debug("We don't have a thumbnail of that size. Generating") # Okay, so we generate one. - file_path = await self.media_repo.generate_local_exact_thumbnail( + thumbnail_result = await self.media_repo.generate_local_exact_thumbnail( media_id, desired_width, desired_height, @@ -368,16 +390,21 @@ class ThumbnailProvider: url_cache=bool(media_info.url_cache), ) - if file_path: + if thumbnail_result: + file_path, file_info = thumbnail_result + assert file_info.thumbnail is not None + if for_federation: await respond_with_multipart_responder( self.hs.get_clock(), request, - FileResponder(open(file_path, "rb")), - media_info, + FileResponder(self.hs, open(file_path, "rb")), + file_info.thumbnail.type, + file_info.thumbnail.length, + None, ) else: - await respond_with_file(request, desired_type, file_path) + await respond_with_file(self.hs, request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") raise SynapseError(400, "Failed to generate thumbnail.") @@ -415,6 +442,10 @@ class ThumbnailProvider: respond_404(request) return + # Check if the media is cached on the client, if so return 304. + if check_for_cached_entry_and_respond(request): + return + thumbnail_infos = await self.store.get_remote_media_thumbnails( server_name, media_id ) @@ -455,7 +486,7 @@ class ThumbnailProvider: ) if file_path: - await respond_with_file(request, desired_type, file_path) + await respond_with_file(self.hs, request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") raise SynapseError(400, "Failed to generate thumbnail.") @@ -494,6 +525,10 @@ class ThumbnailProvider: if media_info.authenticated: raise NotFoundError() + # Check if the media is cached on the client, if so return 304. + if check_for_cached_entry_and_respond(request): + return + thumbnail_infos = await self.store.get_remote_media_thumbnails( server_name, media_id ) @@ -579,7 +614,12 @@ class ThumbnailProvider: if for_federation: assert media_info is not None await respond_with_multipart_responder( - self.hs.get_clock(), request, responder, media_info + self.hs.get_clock(), + request, + responder, + file_info.thumbnail.type, + file_info.thumbnail.length, + None, ) return else: @@ -633,7 +673,12 @@ class ThumbnailProvider: if for_federation: assert media_info is not None await respond_with_multipart_responder( - self.hs.get_clock(), request, responder, media_info + self.hs.get_clock(), + request, + responder, + file_info.thumbnail.type, + file_info.thumbnail.length, + None, ) else: await respond_with_responder( diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py
index 2e65a04789..8ef2b3f0c0 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py
@@ -41,7 +41,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.http.client import SimpleHttpClient from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.media._base import FileInfo, get_filename_from_headers -from synapse.media.media_storage import MediaStorage +from synapse.media.media_storage import MediaStorage, SHA256TransparentIOWriter from synapse.media.oembed import OEmbedProvider from synapse.media.preview_html import decode_body, parse_html_to_open_graph from synapse.metrics.background_process_metrics import run_as_background_process @@ -593,17 +593,26 @@ class UrlPreviewer: file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True) async with self.media_storage.store_into_file(file_info) as (f, fname): + sha256writer = SHA256TransparentIOWriter(f) if url.startswith("data:"): if not allow_data_urls: raise SynapseError( 500, "Previewing of data: URLs is forbidden", Codes.UNKNOWN ) - download_result = await self._parse_data_url(url, f) + download_result = await self._parse_data_url(url, sha256writer.wrap()) else: - download_result = await self._download_url(url, f) + download_result = await self._download_url(url, sha256writer.wrap()) try: + sha256 = sha256writer.hexdigest() + should_quarantine = await self.store.get_is_hash_quarantined(sha256) + + if should_quarantine: + logger.warn( + "Media has been automatically quarantined as it matched existing quarantined media" + ) + time_now_ms = self.clock.time_msec() await self.store.store_local_media( @@ -614,6 +623,8 @@ class UrlPreviewer: media_length=download_result.length, user_id=user, url_cache=url, + sha256=sha256, + quarantined_by="system" if should_quarantine else None, ) except Exception as e: diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 3051b623d0..9ce83da4ba 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py
@@ -427,16 +427,6 @@ build_info.labels( " ".join([platform.system(), platform.release()]), ).set(1) -# 3PID send info -threepid_send_requests = Histogram( - "synapse_threepid_send_requests_with_tries", - documentation="Number of requests for a 3pid token by try count. Note if" - " there is a request with try count of 4, then there would have been one" - " each for 1, 2 and 3", - buckets=(1, 2, 3, 4, 5, 10), - labelnames=("type", "reason"), -) - threadpool_total_threads = Gauge( "synapse_threadpool_total_threads", "Total number of threads currently in the threadpool", diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
index 19c92b02a0..49d0ff9fc1 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py
@@ -293,7 +293,7 @@ def wrap_as_background_process( """ def wrap_as_background_process_inner( - func: Callable[P, Awaitable[Optional[R]]] + func: Callable[P, Awaitable[Optional[R]]], ) -> Callable[P, "defer.Deferred[Optional[R]]"]: @wraps(func) def wrap_as_background_process_inner_2( diff --git a/synapse/metrics/jemalloc.py b/synapse/metrics/jemalloc.py
index bd25985686..321ff58083 100644 --- a/synapse/metrics/jemalloc.py +++ b/synapse/metrics/jemalloc.py
@@ -23,11 +23,10 @@ import ctypes import logging import os import re -from typing import Iterable, Optional, overload +from typing import Iterable, Literal, Optional, overload import attr from prometheus_client import REGISTRY, Metric -from typing_extensions import Literal from synapse.metrics import GaugeMetricFamily from synapse.metrics._types import Collector diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index f6bfd93d3c..e22d6f3ab7 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py
@@ -18,7 +18,6 @@ # [This file includes modifications made by New Vector Limited] # # -import email.utils import logging from typing import ( TYPE_CHECKING, @@ -45,6 +44,7 @@ from twisted.internet.interfaces import IDelayedCall from twisted.web.resource import Resource from synapse.api import errors +from synapse.api.constants import ProfileFields from synapse.api.errors import SynapseError from synapse.api.presence import UserPresenceState from synapse.config import ConfigError @@ -89,6 +89,14 @@ from synapse.module_api.callbacks.account_validity_callbacks import ( ON_USER_LOGIN_CALLBACK, ON_USER_REGISTRATION_CALLBACK, ) +from synapse.module_api.callbacks.media_repository_callbacks import ( + GET_MEDIA_CONFIG_FOR_USER_CALLBACK, + IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK, +) +from synapse.module_api.callbacks.ratelimit_callbacks import ( + GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK, + RatelimitOverride, +) from synapse.module_api.callbacks.spamchecker_callbacks import ( CHECK_EVENT_FOR_SPAM_CALLBACK, CHECK_LOGIN_FOR_SPAM_CALLBACK, @@ -101,21 +109,17 @@ from synapse.module_api.callbacks.spamchecker_callbacks import ( USER_MAY_INVITE_CALLBACK, USER_MAY_JOIN_ROOM_CALLBACK, USER_MAY_PUBLISH_ROOM_CALLBACK, - USER_MAY_SEND_3PID_INVITE_CALLBACK, + USER_MAY_SEND_STATE_EVENT_CALLBACK, SpamCheckerModuleApiCallbacks, ) from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( CHECK_CAN_DEACTIVATE_USER_CALLBACK, CHECK_CAN_SHUTDOWN_ROOM_CALLBACK, CHECK_EVENT_ALLOWED_CALLBACK, - CHECK_THREEPID_CAN_BE_INVITED_CALLBACK, CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, - ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, ON_CREATE_ROOM_CALLBACK, ON_NEW_EVENT_CALLBACK, ON_PROFILE_UPDATE_CALLBACK, - ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, - ON_THREEPID_BIND_CALLBACK, ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK, ) from synapse.push.httppusher import HttpPusher @@ -188,6 +192,7 @@ __all__ = [ "ProfileInfo", "RoomAlias", "UserProfile", + "RatelimitOverride", ] logger = logging.getLogger(__name__) @@ -260,7 +265,6 @@ class ModuleApi: self._state = hs.get_state_handler() self._clock: Clock = hs.get_clock() self._registration_handler = hs.get_registration_handler() - self._send_email_handler = hs.get_send_email_handler() self._push_rules_handler = hs.get_push_rules_handler() self._pusherpool = hs.get_pusherpool() self._device_handler = hs.get_device_handler() @@ -269,20 +273,6 @@ class ModuleApi: self.msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled self._event_serializer = hs.get_event_client_serializer() - try: - app_name = self._hs.config.email.email_app_name - - self._from_string = self._hs.config.email.email_notif_from % { - "app": app_name - } - except (KeyError, TypeError): - # If substitution failed (which can happen if the string contains - # placeholders other than just "app", or if the type of the placeholder is - # not a string), fall back to the bare strings. - self._from_string = self._hs.config.email.email_notif_from - - self._raw_from = email.utils.parseaddr(self._from_string)[1] - # We expose these as properties below in order to attach a helpful docstring. self._http_client: SimpleHttpClient = hs.get_simple_http_client() self._public_room_list_manager = PublicRoomListManager(hs) @@ -304,12 +294,12 @@ class ModuleApi: ] = None, user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None, user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None, - user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None, user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None, user_may_create_room_alias: Optional[ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK ] = None, user_may_publish_room: Optional[USER_MAY_PUBLISH_ROOM_CALLBACK] = None, + user_may_send_state_event: Optional[USER_MAY_SEND_STATE_EVENT_CALLBACK] = None, check_username_for_spam: Optional[CHECK_USERNAME_FOR_SPAM_CALLBACK] = None, check_registration_for_spam: Optional[ CHECK_REGISTRATION_FOR_SPAM_CALLBACK @@ -326,7 +316,6 @@ class ModuleApi: should_drop_federated_event=should_drop_federated_event, user_may_join_room=user_may_join_room, user_may_invite=user_may_invite, - user_may_send_3pid_invite=user_may_send_3pid_invite, user_may_create_room=user_may_create_room, user_may_create_room_alias=user_may_create_room_alias, user_may_publish_room=user_may_publish_room, @@ -334,6 +323,7 @@ class ModuleApi: check_registration_for_spam=check_registration_for_spam, check_media_file_for_spam=check_media_file_for_spam, check_login_for_spam=check_login_for_spam, + user_may_send_state_event=user_may_send_state_event, ) def register_account_validity_callbacks( @@ -359,14 +349,41 @@ class ModuleApi: on_legacy_admin_request=on_legacy_admin_request, ) + def register_ratelimit_callbacks( + self, + *, + get_ratelimit_override_for_user: Optional[ + GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK + ] = None, + ) -> None: + """Registers callbacks for ratelimit capabilities. + Added in Synapse v1.132.0. + """ + return self._callbacks.ratelimit.register_callbacks( + get_ratelimit_override_for_user=get_ratelimit_override_for_user, + ) + + def register_media_repository_callbacks( + self, + *, + get_media_config_for_user: Optional[GET_MEDIA_CONFIG_FOR_USER_CALLBACK] = None, + is_user_allowed_to_upload_media_of_size: Optional[ + IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK + ] = None, + ) -> None: + """Registers callbacks for media repository capabilities. + Added in Synapse v1.132.0. + """ + return self._callbacks.media_repository.register_callbacks( + get_media_config_for_user=get_media_config_for_user, + is_user_allowed_to_upload_media_of_size=is_user_allowed_to_upload_media_of_size, + ) + def register_third_party_rules_callbacks( self, *, check_event_allowed: Optional[CHECK_EVENT_ALLOWED_CALLBACK] = None, on_create_room: Optional[ON_CREATE_ROOM_CALLBACK] = None, - check_threepid_can_be_invited: Optional[ - CHECK_THREEPID_CAN_BE_INVITED_CALLBACK - ] = None, check_visibility_can_be_modified: Optional[ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, @@ -377,13 +394,6 @@ class ModuleApi: on_user_deactivation_status_changed: Optional[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK ] = None, - on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None, - on_add_user_third_party_identifier: Optional[ - ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK - ] = None, - on_remove_user_third_party_identifier: Optional[ - ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK - ] = None, ) -> None: """Registers callbacks for third party event rules capabilities. @@ -392,16 +402,12 @@ class ModuleApi: return self._callbacks.third_party_event_rules.register_third_party_rules_callbacks( check_event_allowed=check_event_allowed, on_create_room=on_create_room, - check_threepid_can_be_invited=check_threepid_can_be_invited, check_visibility_can_be_modified=check_visibility_can_be_modified, on_new_event=on_new_event, check_can_shutdown_room=check_can_shutdown_room, check_can_deactivate_user=check_can_deactivate_user, on_profile_update=on_profile_update, on_user_deactivation_status_changed=on_user_deactivation_status_changed, - on_threepid_bind=on_threepid_bind, - on_add_user_third_party_identifier=on_add_user_third_party_identifier, - on_remove_user_third_party_identifier=on_remove_user_third_party_identifier, ) def register_presence_router_callbacks( @@ -561,14 +567,6 @@ class ModuleApi: return self._hs.config.server.public_baseurl @property - def email_app_name(self) -> str: - """The application name configured in the homeserver's configuration. - - Added in Synapse v1.39.0. - """ - return self._hs.config.email.email_app_name - - @property def server_name(self) -> str: """The server name for the local homeserver. @@ -695,23 +693,6 @@ class ModuleApi: user_id = UserID.from_string(f"@{localpart}:{server_name}") return await self._store.get_profileinfo(user_id) - async def get_threepids_for_user(self, user_id: str) -> List[Dict[str, str]]: - """Look up the threepids (email addresses and phone numbers) associated with the - given Matrix user ID. - - Added in Synapse v1.39.0. - - Args: - user_id: The Matrix user ID to look up threepids for. - - Returns: - A list of threepids, each threepid being represented by a dictionary - containing a "medium" key which value is "email" for email addresses and - "msisdn" for phone numbers, and an "address" key which value is the - threepid's address. - """ - return [attr.asdict(t) for t in await self._store.user_get_threepids(user_id)] - def check_user_exists(self, user_id: str) -> "defer.Deferred[Optional[str]]": """Check if user exists. @@ -893,9 +874,9 @@ class ModuleApi: Raises: synapse.api.errors.AuthError: the access token is invalid """ - assert isinstance( - self._device_handler, DeviceHandler - ), "invalidate_access_token can only be called on the main process" + assert isinstance(self._device_handler, DeviceHandler), ( + "invalidate_access_token can only be called on the main process" + ) # see if the access token corresponds to a device user_info = yield defer.ensureDeferred( @@ -1086,7 +1067,10 @@ class ModuleApi: content = {} # Set the profile if not already done by the module. - if "avatar_url" not in content or "displayname" not in content: + if ( + ProfileFields.AVATAR_URL not in content + or ProfileFields.DISPLAYNAME not in content + ): try: # Try to fetch the user's profile. profile = await self._hs.get_profile_handler().get_profile( @@ -1095,8 +1079,8 @@ class ModuleApi: except SynapseError as e: # If the profile couldn't be found, use default values. profile = { - "displayname": target_user_id.localpart, - "avatar_url": None, + ProfileFields.DISPLAYNAME: target_user_id.localpart, + ProfileFields.AVATAR_URL: None, } if e.code != 404: @@ -1109,11 +1093,9 @@ class ModuleApi: ) # Set the profile where it needs to be set. - if "avatar_url" not in content: - content["avatar_url"] = profile["avatar_url"] - - if "displayname" not in content: - content["displayname"] = profile["displayname"] + for field_name in [ProfileFields.AVATAR_URL, ProfileFields.DISPLAYNAME]: + if field_name not in content and field_name in profile: + content[field_name] = profile[field_name] event_id, _ = await self._hs.get_room_member_handler().update_membership( requester=requester, @@ -1398,31 +1380,6 @@ class ModuleApi: status[p.device_id] = sent return status - async def send_mail( - self, - recipient: str, - subject: str, - html: str, - text: str, - ) -> None: - """Send an email on behalf of the homeserver. - - Added in Synapse v1.39.0. - - Args: - recipient: The email address for the recipient. - subject: The email's subject. - html: The email's HTML content. - text: The email's text content. - """ - await self._send_email_handler.send_email( - email_address=recipient, - subject=subject, - app_name=self.email_app_name, - html=html, - text=text, - ) - def read_templates( self, filenames: List[str], @@ -1584,30 +1541,6 @@ class ModuleApi: """ await self._registration_handler.check_username(username) - async def store_remote_3pid_association( - self, user_id: str, medium: str, address: str, id_server: str - ) -> None: - """Stores an existing association between a user ID and a third-party identifier. - - The association must already exist on the remote identity server. - - Added in Synapse v1.56.0. - - Args: - user_id: The user ID that's been associated with the 3PID. - medium: The medium of the 3PID (current supported values are "msisdn" and - "email"). - address: The address of the 3PID. - id_server: The identity server the 3PID association has been registered on. - This should only be the domain (or IP address, optionally with the port - number) for the identity server. This will be used to reach out to the - identity server using HTTPS (unless specified otherwise by Synapse's - configuration) when attempting to unbind the third-party identifier. - - - """ - await self._store.add_user_bound_threepid(user_id, medium, address, id_server) - def check_push_rule_actions( self, actions: List[Union[str, Dict[str, str]]] ) -> None: @@ -1844,6 +1777,10 @@ class ModuleApi: deactivation=deactivation, ) + def get_current_time_msec(self) -> int: + """Returns the current server time in milliseconds.""" + return self._clock.time_msec() + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/synapse/module_api/callbacks/__init__.py b/synapse/module_api/callbacks/__init__.py
index c20d9543fb..16ef7a4b47 100644 --- a/synapse/module_api/callbacks/__init__.py +++ b/synapse/module_api/callbacks/__init__.py
@@ -27,6 +27,12 @@ if TYPE_CHECKING: from synapse.module_api.callbacks.account_validity_callbacks import ( AccountValidityModuleApiCallbacks, ) +from synapse.module_api.callbacks.media_repository_callbacks import ( + MediaRepositoryModuleApiCallbacks, +) +from synapse.module_api.callbacks.ratelimit_callbacks import ( + RatelimitModuleApiCallbacks, +) from synapse.module_api.callbacks.spamchecker_callbacks import ( SpamCheckerModuleApiCallbacks, ) @@ -38,5 +44,7 @@ from synapse.module_api.callbacks.third_party_event_rules_callbacks import ( class ModuleApiCallbacks: def __init__(self, hs: "HomeServer") -> None: self.account_validity = AccountValidityModuleApiCallbacks() + self.media_repository = MediaRepositoryModuleApiCallbacks(hs) + self.ratelimit = RatelimitModuleApiCallbacks(hs) self.spam_checker = SpamCheckerModuleApiCallbacks(hs) self.third_party_event_rules = ThirdPartyEventRulesModuleApiCallbacks(hs) diff --git a/synapse/module_api/callbacks/media_repository_callbacks.py b/synapse/module_api/callbacks/media_repository_callbacks.py new file mode 100644
index 0000000000..6fa80a8eab --- /dev/null +++ b/synapse/module_api/callbacks/media_repository_callbacks.py
@@ -0,0 +1,76 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +import logging +from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional + +from synapse.types import JsonDict +from synapse.util.async_helpers import delay_cancellation +from synapse.util.metrics import Measure + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + +GET_MEDIA_CONFIG_FOR_USER_CALLBACK = Callable[[str], Awaitable[Optional[JsonDict]]] + +IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK = Callable[[str, int], Awaitable[bool]] + + +class MediaRepositoryModuleApiCallbacks: + def __init__(self, hs: "HomeServer") -> None: + self.clock = hs.get_clock() + self._get_media_config_for_user_callbacks: List[ + GET_MEDIA_CONFIG_FOR_USER_CALLBACK + ] = [] + self._is_user_allowed_to_upload_media_of_size_callbacks: List[ + IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK + ] = [] + + def register_callbacks( + self, + get_media_config_for_user: Optional[GET_MEDIA_CONFIG_FOR_USER_CALLBACK] = None, + is_user_allowed_to_upload_media_of_size: Optional[ + IS_USER_ALLOWED_TO_UPLOAD_MEDIA_OF_SIZE_CALLBACK + ] = None, + ) -> None: + """Register callbacks from module for each hook.""" + if get_media_config_for_user is not None: + self._get_media_config_for_user_callbacks.append(get_media_config_for_user) + + if is_user_allowed_to_upload_media_of_size is not None: + self._is_user_allowed_to_upload_media_of_size_callbacks.append( + is_user_allowed_to_upload_media_of_size + ) + + async def get_media_config_for_user(self, user_id: str) -> Optional[JsonDict]: + for callback in self._get_media_config_for_user_callbacks: + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): + res: Optional[JsonDict] = await delay_cancellation(callback(user_id)) + if res: + return res + + return None + + async def is_user_allowed_to_upload_media_of_size( + self, user_id: str, size: int + ) -> bool: + for callback in self._is_user_allowed_to_upload_media_of_size_callbacks: + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): + res: bool = await delay_cancellation(callback(user_id, size)) + if not res: + return res + + return True diff --git a/synapse/module_api/callbacks/ratelimit_callbacks.py b/synapse/module_api/callbacks/ratelimit_callbacks.py new file mode 100644
index 0000000000..64f9cc81e8 --- /dev/null +++ b/synapse/module_api/callbacks/ratelimit_callbacks.py
@@ -0,0 +1,74 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +import logging +from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional + +import attr + +from synapse.util.async_helpers import delay_cancellation +from synapse.util.metrics import Measure + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +@attr.s(auto_attribs=True) +class RatelimitOverride: + """Represents a ratelimit being overridden.""" + + per_second: float + """The number of actions that can be performed in a second. `0.0` means that ratelimiting is disabled.""" + burst_count: int + """How many actions that can be performed before being limited.""" + + +GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK = Callable[ + [str, str], Awaitable[Optional[RatelimitOverride]] +] + + +class RatelimitModuleApiCallbacks: + def __init__(self, hs: "HomeServer") -> None: + self.clock = hs.get_clock() + self._get_ratelimit_override_for_user_callbacks: List[ + GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK + ] = [] + + def register_callbacks( + self, + get_ratelimit_override_for_user: Optional[ + GET_RATELIMIT_OVERRIDE_FOR_USER_CALLBACK + ] = None, + ) -> None: + """Register callbacks from module for each hook.""" + if get_ratelimit_override_for_user is not None: + self._get_ratelimit_override_for_user_callbacks.append( + get_ratelimit_override_for_user + ) + + async def get_ratelimit_override_for_user( + self, user_id: str, limiter_name: str + ) -> Optional[RatelimitOverride]: + for callback in self._get_ratelimit_override_for_user_callbacks: + with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): + res: Optional[RatelimitOverride] = await delay_cancellation( + callback(user_id, limiter_name) + ) + if res: + return res + + return None diff --git a/synapse/module_api/callbacks/spamchecker_callbacks.py b/synapse/module_api/callbacks/spamchecker_callbacks.py
index 17079ff781..30cab9eb7e 100644 --- a/synapse/module_api/callbacks/spamchecker_callbacks.py +++ b/synapse/module_api/callbacks/spamchecker_callbacks.py
@@ -19,8 +19,10 @@ # # +import functools import inspect import logging +from copy import deepcopy from typing import ( TYPE_CHECKING, Any, @@ -28,14 +30,13 @@ from typing import ( Callable, Collection, List, + Literal, Optional, Tuple, Union, + cast, ) -# `Literal` appears with Python 3.8. -from typing_extensions import Literal - import synapse from synapse.api.errors import Codes from synapse.logging.opentracing import trace @@ -104,24 +105,28 @@ USER_MAY_INVITE_CALLBACK = Callable[ ] ], ] -USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[ - [str, str, str, str], - Awaitable[ - Union[ - Literal["NOT_SPAM"], - Codes, - # Highly experimental, not officially part of the spamchecker API, may - # disappear without warning depending on the results of ongoing - # experiments. - # Use this to return additional information as part of an error. - Tuple[Codes, JsonDict], - # Deprecated - bool, - ] +USER_MAY_CREATE_ROOM_CALLBACK_RETURN_VALUE = Union[ + Literal["NOT_SPAM"], + Codes, + # Highly experimental, not officially part of the spamchecker API, may + # disappear without warning depending on the results of ongoing + # experiments. + # Use this to return additional information as part of an error. + Tuple[Codes, JsonDict], + # Deprecated + bool, +] +USER_MAY_CREATE_ROOM_CALLBACK = Union[ + Callable[ + [str, JsonDict], + Awaitable[USER_MAY_CREATE_ROOM_CALLBACK_RETURN_VALUE], + ], + Callable[ # Single argument variant for backwards compatibility + [str], Awaitable[USER_MAY_CREATE_ROOM_CALLBACK_RETURN_VALUE] ], ] -USER_MAY_CREATE_ROOM_CALLBACK = Callable[ - [str], +USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[ + [str, RoomAlias], Awaitable[ Union[ Literal["NOT_SPAM"], @@ -136,8 +141,8 @@ USER_MAY_CREATE_ROOM_CALLBACK = Callable[ ] ], ] -USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[ - [str, RoomAlias], +USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[ + [str, str], Awaitable[ Union[ Literal["NOT_SPAM"], @@ -152,8 +157,8 @@ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[ ] ], ] -USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[ - [str, str], +USER_MAY_SEND_STATE_EVENT_CALLBACK = Callable[ + [str, str, str, str, JsonDict], Awaitable[ Union[ Literal["NOT_SPAM"], @@ -163,12 +168,13 @@ USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[ # experiments. # Use this to return additional information as part of an error. Tuple[Codes, JsonDict], - # Deprecated - bool, ] ], ] -CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]] +CHECK_USERNAME_FOR_SPAM_CALLBACK = Union[ + Callable[[UserProfile], Awaitable[bool]], + Callable[[UserProfile, str], Awaitable[bool]], +] LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ [ Optional[dict], @@ -293,6 +299,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: "Bad signature for callback check_registration_for_spam", ) + @functools.wraps(wrapped_func) def run(*args: Any, **kwargs: Any) -> Awaitable: # Assertion required because mypy can't prove we won't change `f` # back to `None`. See @@ -324,10 +331,10 @@ class SpamCheckerModuleApiCallbacks: ] = [] self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = [] self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = [] - self._user_may_send_3pid_invite_callbacks: List[ - USER_MAY_SEND_3PID_INVITE_CALLBACK - ] = [] self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = [] + self._user_may_send_state_event_callbacks: List[ + USER_MAY_SEND_STATE_EVENT_CALLBACK + ] = [] self._user_may_create_room_alias_callbacks: List[ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK ] = [] @@ -351,7 +358,6 @@ class SpamCheckerModuleApiCallbacks: ] = None, user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None, user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None, - user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None, user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None, user_may_create_room_alias: Optional[ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK @@ -363,6 +369,7 @@ class SpamCheckerModuleApiCallbacks: ] = None, check_media_file_for_spam: Optional[CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK] = None, check_login_for_spam: Optional[CHECK_LOGIN_FOR_SPAM_CALLBACK] = None, + user_may_send_state_event: Optional[USER_MAY_SEND_STATE_EVENT_CALLBACK] = None, ) -> None: """Register callbacks from module for each hook.""" if check_event_for_spam is not None: @@ -379,14 +386,14 @@ class SpamCheckerModuleApiCallbacks: if user_may_invite is not None: self._user_may_invite_callbacks.append(user_may_invite) - if user_may_send_3pid_invite is not None: - self._user_may_send_3pid_invite_callbacks.append( - user_may_send_3pid_invite, - ) - if user_may_create_room is not None: self._user_may_create_room_callbacks.append(user_may_create_room) + if user_may_send_state_event is not None: + self._user_may_send_state_event_callbacks.append( + user_may_send_state_event, + ) + if user_may_create_room_alias is not None: self._user_may_create_room_alias_callbacks.append( user_may_create_room_alias, @@ -573,29 +580,42 @@ class SpamCheckerModuleApiCallbacks: # No spam-checker has rejected the request, let it pass. return self.NOT_SPAM - async def user_may_send_3pid_invite( - self, inviter_userid: str, medium: str, address: str, room_id: str + async def user_may_create_room( + self, userid: str, room_config: JsonDict ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: - """Checks if a given user may invite a given threepid into the room - - Note that if the threepid is already associated with a Matrix user ID, Synapse - will call user_may_invite with said user ID instead. + """Checks if a given user may create a room Args: - inviter_userid: The user ID of the sender of the invitation - medium: The 3PID's medium (e.g. "email") - address: The 3PID's address (e.g. "alice@example.com") - room_id: The room ID - - Returns: - NOT_SPAM if the operation is permitted, Codes otherwise. + userid: The ID of the user attempting to create a room + room_config: The room creation configuration which is the body of the /createRoom request """ - for callback in self._user_may_send_3pid_invite_callbacks: + for callback in self._user_may_create_room_callbacks: with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): - res = await delay_cancellation( - callback(inviter_userid, medium, address, room_id) - ) - # Normalize return values to `Codes` or `"NOT_SPAM"`. + checker_args = inspect.signature(callback) + # Also ensure backwards compatibility with spam checker callbacks + # that don't expect the room_config argument. + if len(checker_args.parameters) == 2: + callback_with_requester_id = cast( + Callable[ + [str, JsonDict], + Awaitable[USER_MAY_CREATE_ROOM_CALLBACK_RETURN_VALUE], + ], + callback, + ) + # We make a copy of the config to ensure the spam checker cannot modify it. + res = await delay_cancellation( + callback_with_requester_id(userid, deepcopy(room_config)) + ) + else: + callback_without_requester_id = cast( + Callable[ + [str], Awaitable[USER_MAY_CREATE_ROOM_CALLBACK_RETURN_VALUE] + ], + callback, + ) + res = await delay_cancellation( + callback_without_requester_id(userid) + ) if res is True or res is self.NOT_SPAM: continue elif res is False: @@ -611,36 +631,38 @@ class SpamCheckerModuleApiCallbacks: return res else: logger.warning( - "Module returned invalid value, rejecting 3pid invite as spam" + "Module returned invalid value, rejecting room creation as spam" ) return synapse.api.errors.Codes.FORBIDDEN, {} return self.NOT_SPAM - async def user_may_create_room( - self, userid: str + async def user_may_send_state_event( + self, + user_id: str, + room_id: str, + event_type: str, + state_key: str, + content: JsonDict, ) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]: - """Checks if a given user may create a room - + """Checks if a given user may create a room with a given visibility Args: - userid: The ID of the user attempting to create a room + user_id: The ID of the user attempting to create a room + room_id: The ID of the room that the event will be sent to + event_type: The type of the state event + state_key: The state key of the state event + content: The content of the state event """ - for callback in self._user_may_create_room_callbacks: + for callback in self._user_may_send_state_event_callbacks: with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): - res = await delay_cancellation(callback(userid)) - if res is True or res is self.NOT_SPAM: + # We make a copy of the content to ensure that the spam checker cannot modify it. + res = await delay_cancellation( + callback(user_id, room_id, event_type, state_key, deepcopy(content)) + ) + if res is self.NOT_SPAM: continue - elif res is False: - return synapse.api.errors.Codes.FORBIDDEN, {} elif isinstance(res, synapse.api.errors.Codes): return res, {} - elif ( - isinstance(res, tuple) - and len(res) == 2 - and isinstance(res[0], synapse.api.errors.Codes) - and isinstance(res[1], dict) - ): - return res else: logger.warning( "Module returned invalid value, rejecting room creation as spam" @@ -716,7 +738,9 @@ class SpamCheckerModuleApiCallbacks: return self.NOT_SPAM - async def check_username_for_spam(self, user_profile: UserProfile) -> bool: + async def check_username_for_spam( + self, user_profile: UserProfile, requester_id: str + ) -> bool: """Checks if a user ID or display name are considered "spammy" by this server. If the server considers a username spammy, then it will not be included in @@ -727,15 +751,33 @@ class SpamCheckerModuleApiCallbacks: * user_id * display_name * avatar_url + requester_id: The user ID of the user making the user directory search request. Returns: True if the user is spammy. """ for callback in self._check_username_for_spam_callbacks: with Measure(self.clock, f"{callback.__module__}.{callback.__qualname__}"): + checker_args = inspect.signature(callback) # Make a copy of the user profile object to ensure the spam checker cannot # modify it. - res = await delay_cancellation(callback(user_profile.copy())) + # Also ensure backwards compatibility with spam checker callbacks + # that don't expect the requester_id argument. + if len(checker_args.parameters) == 2: + callback_with_requester_id = cast( + Callable[[UserProfile, str], Awaitable[bool]], callback + ) + res = await delay_cancellation( + callback_with_requester_id(user_profile.copy(), requester_id) + ) + else: + callback_without_requester_id = cast( + Callable[[UserProfile], Awaitable[bool]], callback + ) + res = await delay_cancellation( + callback_without_requester_id(user_profile.copy()) + ) + if res: return True @@ -755,8 +797,8 @@ class SpamCheckerModuleApiCallbacks: username: The request user name, if any request_info: List of tuples of user agent and IP that were used during the registration process. - auth_provider_id: The SSO IdP the user used, e.g "oidc", "saml", - "cas". If any. Note this does not include users registered + auth_provider_id: The SSO IdP the user used, e.g "oidc". + If any. Note this does not include users registered via a password provider. Returns: @@ -844,8 +886,8 @@ class SpamCheckerModuleApiCallbacks: user_id: The request user ID request_info: List of tuples of user agent and IP that were used during the registration process. - auth_provider_id: The SSO IdP the user used, e.g "oidc", "saml", - "cas". If any. Note this does not include users registered + auth_provider_id: The SSO IdP the user used, e.g "oidc". + If any. Note this does not include users registered via a password provider. Returns: diff --git a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py
index 9f7a04372d..13508cc582 100644 --- a/synapse/module_api/callbacks/third_party_event_rules_callbacks.py +++ b/synapse/module_api/callbacks/third_party_event_rules_callbacks.py
@@ -40,9 +40,6 @@ CHECK_EVENT_ALLOWED_CALLBACK = Callable[ [EventBase, StateMap[EventBase]], Awaitable[Tuple[bool, Optional[dict]]] ] ON_CREATE_ROOM_CALLBACK = Callable[[Requester, dict, bool], Awaitable] -CHECK_THREEPID_CAN_BE_INVITED_CALLBACK = Callable[ - [str, str, StateMap[EventBase]], Awaitable[bool] -] CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK = Callable[ [str, StateMap[EventBase], str], Awaitable[bool] ] @@ -51,9 +48,6 @@ CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[Optional[str], str], Awaitable[bool CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]] ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable] ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable] -ON_THREEPID_BIND_CALLBACK = Callable[[str, str, str], Awaitable] -ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable] -ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable] def load_legacy_third_party_event_rules(hs: "HomeServer") -> None: @@ -73,7 +67,6 @@ def load_legacy_third_party_event_rules(hs: "HomeServer") -> None: third_party_event_rules_methods = { "check_event_allowed", "on_create_room", - "check_threepid_can_be_invited", "check_visibility_can_be_modified", } @@ -161,9 +154,6 @@ class ThirdPartyEventRulesModuleApiCallbacks: self._check_event_allowed_callbacks: List[CHECK_EVENT_ALLOWED_CALLBACK] = [] self._on_create_room_callbacks: List[ON_CREATE_ROOM_CALLBACK] = [] - self._check_threepid_can_be_invited_callbacks: List[ - CHECK_THREEPID_CAN_BE_INVITED_CALLBACK - ] = [] self._check_visibility_can_be_modified_callbacks: List[ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = [] @@ -178,21 +168,11 @@ class ThirdPartyEventRulesModuleApiCallbacks: self._on_user_deactivation_status_changed_callbacks: List[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK ] = [] - self._on_threepid_bind_callbacks: List[ON_THREEPID_BIND_CALLBACK] = [] - self._on_add_user_third_party_identifier_callbacks: List[ - ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK - ] = [] - self._on_remove_user_third_party_identifier_callbacks: List[ - ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK - ] = [] def register_third_party_rules_callbacks( self, check_event_allowed: Optional[CHECK_EVENT_ALLOWED_CALLBACK] = None, on_create_room: Optional[ON_CREATE_ROOM_CALLBACK] = None, - check_threepid_can_be_invited: Optional[ - CHECK_THREEPID_CAN_BE_INVITED_CALLBACK - ] = None, check_visibility_can_be_modified: Optional[ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, @@ -202,14 +182,7 @@ class ThirdPartyEventRulesModuleApiCallbacks: on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, on_user_deactivation_status_changed: Optional[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK - ] = None, - on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None, - on_add_user_third_party_identifier: Optional[ - ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK - ] = None, - on_remove_user_third_party_identifier: Optional[ - ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK - ] = None, + ] = None ) -> None: """Register callbacks from modules for each hook.""" if check_event_allowed is not None: @@ -218,11 +191,6 @@ class ThirdPartyEventRulesModuleApiCallbacks: if on_create_room is not None: self._on_create_room_callbacks.append(on_create_room) - if check_threepid_can_be_invited is not None: - self._check_threepid_can_be_invited_callbacks.append( - check_threepid_can_be_invited, - ) - if check_visibility_can_be_modified is not None: self._check_visibility_can_be_modified_callbacks.append( check_visibility_can_be_modified, @@ -236,6 +204,7 @@ class ThirdPartyEventRulesModuleApiCallbacks: if check_can_deactivate_user is not None: self._check_can_deactivate_user_callbacks.append(check_can_deactivate_user) + if on_profile_update is not None: self._on_profile_update_callbacks.append(on_profile_update) @@ -244,19 +213,6 @@ class ThirdPartyEventRulesModuleApiCallbacks: on_user_deactivation_status_changed, ) - if on_threepid_bind is not None: - self._on_threepid_bind_callbacks.append(on_threepid_bind) - - if on_add_user_third_party_identifier is not None: - self._on_add_user_third_party_identifier_callbacks.append( - on_add_user_third_party_identifier - ) - - if on_remove_user_third_party_identifier is not None: - self._on_remove_user_third_party_identifier_callbacks.append( - on_remove_user_third_party_identifier - ) - async def check_event_allowed( self, event: EventBase, @@ -349,39 +305,6 @@ class ThirdPartyEventRulesModuleApiCallbacks: raise e - async def check_threepid_can_be_invited( - self, medium: str, address: str, room_id: str - ) -> bool: - """Check if a provided 3PID can be invited in the given room. - - Args: - medium: The 3PID's medium. - address: The 3PID's address. - room_id: The room we want to invite the threepid to. - - Returns: - True if the 3PID can be invited, False if not. - """ - # Bail out early without hitting the store if we don't have any callbacks to run. - if len(self._check_threepid_can_be_invited_callbacks) == 0: - return True - - state_events = await self._storage_controllers.state.get_current_state(room_id) - - for callback in self._check_threepid_can_be_invited_callbacks: - try: - threepid_can_be_invited = await delay_cancellation( - callback(medium, address, state_events) - ) - if threepid_can_be_invited is False: - return False - except CancelledError: - raise - except Exception as e: - logger.warning("Failed to run module API callback %s: %s", callback, e) - - return True - async def check_visibility_can_be_modified( self, room_id: str, new_visibility: str ) -> bool: @@ -533,67 +456,3 @@ class ThirdPartyEventRulesModuleApiCallbacks: logger.exception( "Failed to run module API callback %s: %s", callback, e ) - - async def on_threepid_bind(self, user_id: str, medium: str, address: str) -> None: - """Called after a threepid association has been verified and stored. - - Note that this callback is called when an association is created on the - local homeserver, not when it's created on an identity server (and then kept track - of so that it can be unbound on the same IS later on). - - THIS MODULE CALLBACK METHOD HAS BEEN DEPRECATED. Please use the - `on_add_user_third_party_identifier` callback method instead. - - Args: - user_id: the user being associated with the threepid. - medium: the threepid's medium. - address: the threepid's address. - """ - for callback in self._on_threepid_bind_callbacks: - try: - await callback(user_id, medium, address) - except Exception as e: - logger.exception( - "Failed to run module API callback %s: %s", callback, e - ) - - async def on_add_user_third_party_identifier( - self, user_id: str, medium: str, address: str - ) -> None: - """Called when an association between a user's Matrix ID and a third-party ID - (email, phone number) has successfully been registered on the homeserver. - - Args: - user_id: The User ID included in the association. - medium: The medium of the third-party ID (email, msisdn). - address: The address of the third-party ID (i.e. an email address). - """ - for callback in self._on_add_user_third_party_identifier_callbacks: - try: - await callback(user_id, medium, address) - except Exception as e: - logger.exception( - "Failed to run module API callback %s: %s", callback, e - ) - - async def on_remove_user_third_party_identifier( - self, user_id: str, medium: str, address: str - ) -> None: - """Called when an association between a user's Matrix ID and a third-party ID - (email, phone number) has been successfully removed on the homeserver. - - This is called *after* any known bindings on identity servers for this - association have been removed. - - Args: - user_id: The User ID included in the removed association. - medium: The medium of the third-party ID (email, msisdn). - address: The address of the third-party ID (i.e. an email address). - """ - for callback in self._on_remove_user_third_party_identifier_callbacks: - try: - await callback(user_id, medium, address) - except Exception as e: - logger.exception( - "Failed to run module API callback %s: %s", callback, e - ) diff --git a/synapse/notifier.py b/synapse/notifier.py
index 7a2b54036c..6190432b87 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py
@@ -41,6 +41,7 @@ import attr from prometheus_client import Counter from twisted.internet import defer +from twisted.internet.defer import Deferred from synapse.api.constants import EduTypes, EventTypes, HistoryVisibility, Membership from synapse.api.errors import AuthError @@ -52,6 +53,7 @@ from synapse.logging.opentracing import log_kv, start_active_span from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig from synapse.types import ( + ISynapseReactor, JsonDict, MultiWriterStreamToken, PersistedEventPosition, @@ -61,8 +63,10 @@ from synapse.types import ( StreamToken, UserID, ) -from synapse.util.async_helpers import ObservableDeferred, timeout_deferred -from synapse.util.metrics import Measure +from synapse.util.async_helpers import ( + timeout_deferred, +) +from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -89,18 +93,6 @@ def count(func: Callable[[T], bool], it: Iterable[T]) -> int: return n -class _NotificationListener: - """This represents a single client connection to the events stream. - The events stream handler will have yielded to the deferred, so to - notify the handler it is sufficient to resolve the deferred. - """ - - __slots__ = ["deferred"] - - def __init__(self, deferred: "defer.Deferred"): - self.deferred = deferred - - class _NotifierUserStream: """This represents a user connected to the event stream. It tracks the most recent stream token for that user. @@ -113,59 +105,49 @@ class _NotifierUserStream: def __init__( self, + reactor: ISynapseReactor, user_id: str, rooms: StrCollection, current_token: StreamToken, time_now_ms: int, ): + self.reactor = reactor self.user_id = user_id self.rooms = set(rooms) - self.current_token = current_token # The last token for which we should wake up any streams that have a # token that comes before it. This gets updated every time we get poked. # We start it at the current token since if we get any streams # that have a token from before we have no idea whether they should be # woken up or not, so lets just wake them up. - self.last_notified_token = current_token + self.current_token = current_token self.last_notified_ms = time_now_ms - self.notify_deferred: ObservableDeferred[StreamToken] = ObservableDeferred( - defer.Deferred() - ) + # Set of listeners that we need to wake up when there has been a change. + self.listeners: Set[Deferred[StreamToken]] = set() - def notify( + def update_and_fetch_deferreds( self, - stream_key: StreamKeyType, - stream_id: Union[int, RoomStreamToken, MultiWriterStreamToken], + current_token: StreamToken, time_now_ms: int, - ) -> None: - """Notify any listeners for this user of a new event from an - event source. + ) -> Collection["Deferred[StreamToken]"]: + """Update the stream for this user because of a new event from an + event source, and return the set of deferreds to wake up. + Args: - stream_key: The stream the event came from. - stream_id: The new id for the stream the event came from. + current_token: The new current token. time_now_ms: The current time in milliseconds. + + Returns: + The set of deferreds that need to be called. """ - self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) - self.last_notified_token = self.current_token + self.current_token = current_token self.last_notified_ms = time_now_ms - notify_deferred = self.notify_deferred - - log_kv( - { - "notify": self.user_id, - "stream": stream_key, - "stream_id": stream_id, - "listeners": self.count_listeners(), - } - ) - users_woken_by_stream_counter.labels(stream_key).inc() + listeners = self.listeners + self.listeners = set() - with PreserveLoggingContext(): - self.notify_deferred = ObservableDeferred(defer.Deferred()) - notify_deferred.callback(self.current_token) + return listeners def remove(self, notifier: "Notifier") -> None: """Remove this listener from all the indexes in the Notifier @@ -176,12 +158,15 @@ class _NotifierUserStream: lst = notifier.room_to_user_streams.get(room, set()) lst.discard(self) + if not lst: + notifier.room_to_user_streams.pop(room, None) + notifier.user_to_user_stream.pop(self.user_id) def count_listeners(self) -> int: - return len(self.notify_deferred.observers()) + return len(self.listeners) - def new_listener(self, token: StreamToken) -> _NotificationListener: + def new_listener(self, token: StreamToken) -> "Deferred[StreamToken]": """Returns a deferred that is resolved when there is a new token greater than the given token. @@ -191,10 +176,17 @@ class _NotifierUserStream: """ # Immediately wake up stream if something has already since happened # since their last token. - if self.last_notified_token != token: - return _NotificationListener(defer.succeed(self.current_token)) - else: - return _NotificationListener(self.notify_deferred.observe()) + if token != self.current_token: + return defer.succeed(self.current_token) + + # Create a new deferred and add it to the set of listeners. We add a + # cancel handler to remove it from the set again, to handle timeouts. + deferred: "Deferred[StreamToken]" = Deferred( + canceller=lambda d: self.listeners.discard(d) + ) + self.listeners.add(deferred) + + return deferred @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -247,6 +239,7 @@ class Notifier: # List of callbacks to be notified when a lock is released self._lock_released_callback: List[Callable[[str, str, str], None]] = [] + self.reactor = hs.get_reactor() self.clock = hs.get_clock() self.appservice_handler = hs.get_application_service_handler() self._pusher_pool = hs.get_pusherpool() @@ -342,14 +335,25 @@ class Notifier: # Wake up all related user stream notifiers user_streams = self.room_to_user_streams.get(room_id, set()) time_now_ms = self.clock.time_msec() + current_token = self.event_sources.get_current_token() + + listeners: List["Deferred[StreamToken]"] = [] for user_stream in user_streams: try: - user_stream.notify( - StreamKeyType.UN_PARTIAL_STATED_ROOMS, new_token, time_now_ms + listeners.extend( + user_stream.update_and_fetch_deferreds(current_token, time_now_ms) ) except Exception: logger.exception("Failed to notify listener") + with PreserveLoggingContext(): + for listener in listeners: + listener.callback(current_token) + + users_woken_by_stream_counter.labels(StreamKeyType.UN_PARTIAL_STATED_ROOMS).inc( + len(user_streams) + ) + # Poke the replication so that other workers also see the write to # the un-partial-stated rooms stream. self.notify_replication() @@ -518,16 +522,22 @@ class Notifier: users = users or [] rooms = rooms or [] - with Measure(self.clock, "on_new_event"): - user_streams = set() + user_streams: Set[_NotifierUserStream] = set() - log_kv( - { - "waking_up_explicit_users": len(users), - "waking_up_explicit_rooms": len(rooms), - } - ) + log_kv( + { + "waking_up_explicit_users": len(users), + "waking_up_explicit_rooms": len(rooms), + "users": shortstr(users), + "rooms": shortstr(rooms), + "stream": stream_key, + "stream_id": new_token, + } + ) + # Only calculate which user streams to wake up if there are, in fact, + # any user streams registered. + if self.user_to_user_stream or self.room_to_user_streams: for user in users: user_stream = self.user_to_user_stream.get(str(user)) if user_stream is not None: @@ -544,25 +554,40 @@ class Notifier: ) time_now_ms = self.clock.time_msec() + current_token = self.event_sources.get_current_token() + listeners: List["Deferred[StreamToken]"] = [] for user_stream in user_streams: try: - user_stream.notify(stream_key, new_token, time_now_ms) + listeners.extend( + user_stream.update_and_fetch_deferreds( + current_token, time_now_ms + ) + ) except Exception: logger.exception("Failed to notify listener") - self.notify_replication() + # We resolve all these deferreds in one go so that we only need to + # call `PreserveLoggingContext` once, as it has a bunch of overhead + # (to calculate performance stats) + if listeners: + with PreserveLoggingContext(): + for listener in listeners: + listener.callback(current_token) - # Notify appservices. - try: - self.appservice_handler.notify_interested_services_ephemeral( - stream_key, - new_token, - users, - ) - except Exception: - logger.exception( - "Error notifying application services of ephemeral events" - ) + if user_streams: + users_woken_by_stream_counter.labels(stream_key).inc(len(user_streams)) + + self.notify_replication() + + # Notify appservices. + try: + self.appservice_handler.notify_interested_services_ephemeral( + stream_key, + new_token, + users, + ) + except Exception: + logger.exception("Error notifying application services of ephemeral events") def on_new_replication_data(self) -> None: """Used to inform replication listeners that something has happened @@ -586,6 +611,7 @@ class Notifier: if room_ids is None: room_ids = await self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( + reactor=self.reactor, user_id=user_id, rooms=room_ids, current_token=current_token, @@ -608,8 +634,8 @@ class Notifier: # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) - listener.deferred = timeout_deferred( - listener.deferred, + listener = timeout_deferred( + listener, (end_time - now) / 1000.0, self.hs.get_reactor(), ) @@ -622,7 +648,7 @@ class Notifier: ) with PreserveLoggingContext(): - await listener.deferred + await listener log_kv( { diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 34ab637c3d..8249d5e84f 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -52,6 +52,7 @@ from synapse.events.snapshot import EventContext from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.state import POWER_KEY from synapse.storage.databases.main.roommember import EventIdMembership +from synapse.storage.invite_rule import InviteRule from synapse.storage.roommember import ProfileInfo from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator from synapse.types import JsonValue @@ -191,9 +192,17 @@ class BulkPushRuleEvaluator: # if this event is an invite event, we may need to run rules for the user # who's been invited, otherwise they won't get told they've been invited - if event.type == EventTypes.Member and event.membership == Membership.INVITE: + if ( + event.is_state() + and event.type == EventTypes.Member + and event.membership == Membership.INVITE + ): invited = event.state_key - if invited and self.hs.is_mine_id(invited) and invited not in local_users: + invite_config = await self.store.get_invite_config_for_user(invited) + if invite_config.get_invite_rule(event.sender) != InviteRule.ALLOW: + # Invite was blocked or ignored, never notify. + return {} + if self.hs.is_mine_id(invited) and invited not in local_users: local_users.append(invited) if not local_users: @@ -304,9 +313,9 @@ class BulkPushRuleEvaluator: if relation_type == "m.thread" and event.content.get( "m.relates_to", {} ).get("is_falling_back", False): - related_events["m.in_reply_to"][ - "im.vector.is_falling_back" - ] = "" + related_events["m.in_reply_to"]["im.vector.is_falling_back"] = ( + "" + ) return related_events @@ -371,8 +380,9 @@ class BulkPushRuleEvaluator: "Deferred[Tuple[int, Tuple[dict, Optional[int]], Dict[str, Dict[str, JsonValue]], Mapping[str, ProfileInfo]]]", gather_results( ( - run_in_background( # type: ignore[call-arg] - self.store.get_number_joined_users_in_room, event.room_id # type: ignore[arg-type] + run_in_background( # type: ignore[call-overload] + self.store.get_number_joined_users_in_room, + event.room_id, # type: ignore[arg-type] ), run_in_background( self._get_power_levels_and_sender_level, @@ -381,10 +391,10 @@ class BulkPushRuleEvaluator: event_id_to_event, ), run_in_background(self._related_events, event), - run_in_background( # type: ignore[call-arg] + run_in_background( # type: ignore[call-overload] self.store.get_subset_users_in_room_with_profiles, - event.room_id, # type: ignore[arg-type] - rules_by_user.keys(), # type: ignore[arg-type] + event.room_id, + rules_by_user.keys(), ), ), consumeErrors=True, @@ -435,6 +445,7 @@ class BulkPushRuleEvaluator: self._related_event_match_enabled, event.room_version.msc3931_push_features, self.hs.config.experimental.msc1767_enabled, # MSC3931 flag + self.hs.config.experimental.msc4210_enabled, ) for uid, rules in rules_by_user.items(): diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py deleted file mode 100644
index 0a14c534f7..0000000000 --- a/synapse/push/emailpusher.py +++ /dev/null
@@ -1,331 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -import logging -from typing import TYPE_CHECKING, Dict, List, Optional - -from twisted.internet.error import AlreadyCalled, AlreadyCancelled -from twisted.internet.interfaces import IDelayedCall - -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.push import Pusher, PusherConfig, PusherConfigException, ThrottleParams -from synapse.push.mailer import Mailer -from synapse.push.push_types import EmailReason -from synapse.storage.databases.main.event_push_actions import EmailPushAction -from synapse.util.threepids import validate_email - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - -# THROTTLE is the minimum time between mail notifications sent for a given room. -# Each room maintains its own throttle counter, but each new mail notification -# sends the pending notifications for all rooms. -THROTTLE_MAX_MS = 24 * 60 * 60 * 1000 # 24h -# THROTTLE_MULTIPLIER = 6 # 10 mins, 1 hour, 6 hours, 24 hours -THROTTLE_MULTIPLIER = 144 # 10 mins, 24 hours - i.e. jump straight to 1 day - -# If no event triggers a notification for this long after the previous, -# the throttle is released. -# 12 hours - a gap of 12 hours in conversation is surely enough to merit a new -# notification when things get going again... -THROTTLE_RESET_AFTER_MS = 12 * 60 * 60 * 1000 - -# does each email include all unread notifs, or just the ones which have happened -# since the last mail? -# XXX: this is currently broken as it includes ones from parted rooms(!) -INCLUDE_ALL_UNREAD_NOTIFS = False - - -class EmailPusher(Pusher): - """ - A pusher that sends email notifications about events (approximately) - when they happen. - This shares quite a bit of code with httpusher: it would be good to - factor out the common parts - """ - - def __init__(self, hs: "HomeServer", pusher_config: PusherConfig, mailer: Mailer): - super().__init__(hs, pusher_config) - self.mailer = mailer - - self.store = self.hs.get_datastores().main - self.email = pusher_config.pushkey - self.timed_call: Optional[IDelayedCall] = None - self.throttle_params: Dict[str, ThrottleParams] = {} - self._inited = False - - self._is_processing = False - - # Make sure that the email is valid. - try: - validate_email(self.email) - except ValueError: - raise PusherConfigException("Invalid email") - - self._delay_before_mail_ms = self.hs.config.email.notif_delay_before_mail_ms - - def on_started(self, should_check_for_notifs: bool) -> None: - """Called when this pusher has been started. - - Args: - should_check_for_notifs: Whether we should immediately - check for push to send. Set to False only if it's known there - is nothing to send - """ - if should_check_for_notifs and self.mailer is not None: - self._start_processing() - - def on_stop(self) -> None: - if self.timed_call: - try: - self.timed_call.cancel() - except (AlreadyCalled, AlreadyCancelled): - pass - self.timed_call = None - - def on_new_receipts(self) -> None: - # We could wake up and cancel the timer but there tend to be quite a - # lot of read receipts so it's probably less work to just let the - # timer fire - pass - - def on_timer(self) -> None: - self.timed_call = None - self._start_processing() - - def _start_processing(self) -> None: - if self._is_processing: - return - - run_as_background_process("emailpush.process", self._process) - - def _pause_processing(self) -> None: - """Used by tests to temporarily pause processing of events. - - Asserts that its not currently processing. - """ - assert not self._is_processing - self._is_processing = True - - def _resume_processing(self) -> None: - """Used by tests to resume processing of events after pausing.""" - assert self._is_processing - self._is_processing = False - self._start_processing() - - async def _process(self) -> None: - # we should never get here if we are already processing - assert not self._is_processing - - try: - self._is_processing = True - - if not self._inited: - # this is our first loop: load up the throttle params - assert self.pusher_id is not None - self.throttle_params = await self.store.get_throttle_params_by_room( - self.pusher_id - ) - self._inited = True - - # if the max ordering changes while we're running _unsafe_process, - # call it again, and so on until we've caught up. - while True: - starting_max_ordering = self.max_stream_ordering - try: - await self._unsafe_process() - except Exception: - logger.exception("Exception processing notifs") - if self.max_stream_ordering == starting_max_ordering: - break - finally: - self._is_processing = False - - async def _unsafe_process(self) -> None: - """ - Main logic of the push loop without the wrapper function that sets - up logging, measures and guards against multiple instances of it - being run. - """ - start = 0 if INCLUDE_ALL_UNREAD_NOTIFS else self.last_stream_ordering - unprocessed = ( - await self.store.get_unread_push_actions_for_user_in_range_for_email( - self.user_id, start, self.max_stream_ordering - ) - ) - - soonest_due_at: Optional[int] = None - - if not unprocessed: - await self.save_last_stream_ordering_and_success(self.max_stream_ordering) - return - - for push_action in unprocessed: - received_at = push_action.received_ts - if received_at is None: - received_at = 0 - notif_ready_at = received_at + self._delay_before_mail_ms - - room_ready_at = self.room_ready_to_notify_at(push_action.room_id) - - should_notify_at = max(notif_ready_at, room_ready_at) - - if should_notify_at <= self.clock.time_msec(): - # one of our notifications is ready for sending, so we send - # *one* email updating the user on their notifications, - # we then consider all previously outstanding notifications - # to be delivered. - - reason: EmailReason = { - "room_id": push_action.room_id, - "now": self.clock.time_msec(), - "received_at": received_at, - "delay_before_mail_ms": self._delay_before_mail_ms, - "last_sent_ts": self.get_room_last_sent_ts(push_action.room_id), - "throttle_ms": self.get_room_throttle_ms(push_action.room_id), - } - - await self.send_notification(unprocessed, reason) - - await self.save_last_stream_ordering_and_success( - max(ea.stream_ordering for ea in unprocessed) - ) - - # we update the throttle on all the possible unprocessed push actions - for ea in unprocessed: - await self.sent_notif_update_throttle(ea.room_id, ea) - break - else: - if soonest_due_at is None or should_notify_at < soonest_due_at: - soonest_due_at = should_notify_at - - if self.timed_call is not None: - try: - self.timed_call.cancel() - except (AlreadyCalled, AlreadyCancelled): - pass - self.timed_call = None - - if soonest_due_at is not None: - self.timed_call = self.hs.get_reactor().callLater( - self.seconds_until(soonest_due_at), self.on_timer - ) - - async def save_last_stream_ordering_and_success( - self, last_stream_ordering: int - ) -> None: - self.last_stream_ordering = last_stream_ordering - pusher_still_exists = ( - await self.store.update_pusher_last_stream_ordering_and_success( - self.app_id, - self.email, - self.user_id, - last_stream_ordering, - self.clock.time_msec(), - ) - ) - if not pusher_still_exists: - # The pusher has been deleted while we were processing, so - # lets just stop and return. - self.on_stop() - - def seconds_until(self, ts_msec: int) -> float: - secs = (ts_msec - self.clock.time_msec()) / 1000 - return max(secs, 0) - - def get_room_throttle_ms(self, room_id: str) -> int: - if room_id in self.throttle_params: - return self.throttle_params[room_id].throttle_ms - else: - return 0 - - def get_room_last_sent_ts(self, room_id: str) -> int: - if room_id in self.throttle_params: - return self.throttle_params[room_id].last_sent_ts - else: - return 0 - - def room_ready_to_notify_at(self, room_id: str) -> int: - """ - Determines whether throttling should prevent us from sending an email - for the given room - - Returns: - The timestamp when we are next allowed to send an email notif - for this room - """ - last_sent_ts = self.get_room_last_sent_ts(room_id) - throttle_ms = self.get_room_throttle_ms(room_id) - - may_send_at = last_sent_ts + throttle_ms - return may_send_at - - async def sent_notif_update_throttle( - self, room_id: str, notified_push_action: EmailPushAction - ) -> None: - # We have sent a notification, so update the throttle accordingly. - # If the event that triggered the notif happened more than - # THROTTLE_RESET_AFTER_MS after the previous one that triggered a - # notif, we release the throttle. Otherwise, the throttle is increased. - time_of_previous_notifs = await self.store.get_time_of_last_push_action_before( - notified_push_action.stream_ordering - ) - - time_of_this_notifs = notified_push_action.received_ts - - if time_of_previous_notifs is not None and time_of_this_notifs is not None: - gap = time_of_this_notifs - time_of_previous_notifs - else: - # if we don't know the arrival time of one of the notifs (it was not - # stored prior to email notification code) then assume a gap of - # zero which will just not reset the throttle - gap = 0 - - current_throttle_ms = self.get_room_throttle_ms(room_id) - - if gap > THROTTLE_RESET_AFTER_MS: - new_throttle_ms = self._delay_before_mail_ms - else: - if current_throttle_ms == 0: - new_throttle_ms = self._delay_before_mail_ms - else: - new_throttle_ms = min( - current_throttle_ms * THROTTLE_MULTIPLIER, THROTTLE_MAX_MS - ) - self.throttle_params[room_id] = ThrottleParams( - self.clock.time_msec(), - new_throttle_ms, - ) - assert self.pusher_id is not None - await self.store.set_throttle_params( - self.pusher_id, room_id, self.throttle_params[room_id] - ) - - async def send_notification( - self, push_actions: List[EmailPushAction], reason: EmailReason - ) -> None: - logger.info("Sending notif email for user %r", self.user_id) - - await self.mailer.send_notification_mail( - self.app_id, self.user_id, self.email, push_actions, reason - ) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index dd9b64d6ef..7df8a128c9 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py
@@ -127,6 +127,11 @@ class HttpPusher(Pusher): if self.data is None: raise PusherConfigException("'data' key can not be null for HTTP pusher") + # Check if badge counts should be disabled for this push gateway + self.disable_badge_count = self.hs.config.experimental.msc4076_enabled and bool( + self.data.get("org.matrix.msc4076.disable_badge_count", False) + ) + self.name = "%s/%s/%s" % ( pusher_config.user_name, pusher_config.app_id, @@ -200,6 +205,12 @@ class HttpPusher(Pusher): if self._is_processing: return + # Check if we are trying, but failing, to contact the pusher. If so, we + # don't try and start processing immediately and instead wait for the + # retry loop to try again later (which is controlled by the timer). + if self.failing_since and self.timed_call and self.timed_call.active(): + return + run_as_background_process("httppush.process", self._process) async def _process(self) -> None: @@ -461,9 +472,10 @@ class HttpPusher(Pusher): content: JsonDict = { "event_id": event.event_id, "room_id": event.room_id, - "counts": {"unread": badge}, "prio": priority, } + if not self.disable_badge_count: + content["counts"] = {"unread": badge} # event_id_only doesn't include the tweaks, so override them. tweaks = {} else: @@ -478,11 +490,11 @@ class HttpPusher(Pusher): "type": event.type, "sender": event.user_id, "prio": priority, - "counts": { - "unread": badge, - # 'missed_calls': 2 - }, } + if not self.disable_badge_count: + content["counts"] = { + "unread": badge, + } if event.type == "m.room.member" and event.is_state(): content["membership"] = event.content["membership"] content["user_is_target"] = event.state_key == self.user_id diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py deleted file mode 100644
index cf611bd90b..0000000000 --- a/synapse/push/mailer.py +++ /dev/null
@@ -1,1003 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -import logging -import urllib.parse -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, TypeVar - -import bleach -import jinja2 -from markupsafe import Markup -from prometheus_client import Counter - -from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes -from synapse.api.errors import StoreError -from synapse.config.emailconfig import EmailSubjectConfig -from synapse.events import EventBase -from synapse.push.presentable_names import ( - calculate_room_name, - descriptor_from_member_events, - name_from_member_event, -) -from synapse.push.push_types import ( - EmailReason, - MessageVars, - NotifVars, - RoomVars, - TemplateVars, -) -from synapse.storage.databases.main.event_push_actions import EmailPushAction -from synapse.types import StateMap, UserID -from synapse.types.state import StateFilter -from synapse.util.async_helpers import concurrently_execute -from synapse.visibility import filter_events_for_client - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - -T = TypeVar("T") - -emails_sent_counter = Counter( - "synapse_emails_sent_total", - "Emails sent by type", - ["type"], -) - - -CONTEXT_BEFORE = 1 -CONTEXT_AFTER = 1 - -# From https://github.com/matrix-org/matrix-react-sdk/blob/master/src/HtmlUtils.js -ALLOWED_TAGS = [ - "font", # custom to matrix for IRC-style font coloring - "del", # for markdown - # deliberately no h1/h2 to stop people shouting. - "h3", - "h4", - "h5", - "h6", - "blockquote", - "p", - "a", - "ul", - "ol", - "nl", - "li", - "b", - "i", - "u", - "strong", - "em", - "strike", - "code", - "hr", - "br", - "div", - "table", - "thead", - "caption", - "tbody", - "tr", - "th", - "td", - "pre", -] -ALLOWED_ATTRS = { - # custom ones first: - "font": ["color"], # custom to matrix - "a": ["href", "name", "target"], # remote target: custom to matrix - # We don't currently allow img itself by default, but this - # would make sense if we did - "img": ["src"], -} -# When bleach release a version with this option, we can specify schemes -# ALLOWED_SCHEMES = ["http", "https", "ftp", "mailto"] - - -class Mailer: - def __init__( - self, - hs: "HomeServer", - app_name: str, - template_html: jinja2.Template, - template_text: jinja2.Template, - ): - self.hs = hs - self.template_html = template_html - self.template_text = template_text - - self.send_email_handler = hs.get_send_email_handler() - self.store = self.hs.get_datastores().main - self._state_storage_controller = self.hs.get_storage_controllers().state - self.macaroon_gen = self.hs.get_macaroon_generator() - self.state_handler = self.hs.get_state_handler() - self._storage_controllers = hs.get_storage_controllers() - self.app_name = app_name - self.email_subjects: EmailSubjectConfig = hs.config.email.email_subjects - - logger.info("Created Mailer for app_name %s" % app_name) - - emails_sent_counter.labels("password_reset") - - async def send_password_reset_mail( - self, email_address: str, token: str, client_secret: str, sid: str - ) -> None: - """Send an email with a password reset link to a user - - Args: - email_address: Email address we're sending the password - reset to - token: Unique token generated by the server to verify - the email was received - client_secret: Unique token generated by the client to - group together multiple email sending attempts - sid: The generated session ID - """ - params = {"token": token, "client_secret": client_secret, "sid": sid} - link = ( - self.hs.config.server.public_baseurl - + "_synapse/client/password_reset/email/submit_token?%s" - % urllib.parse.urlencode(params) - ) - - template_vars: TemplateVars = {"link": link} - - emails_sent_counter.labels("password_reset").inc() - - await self.send_email( - email_address, - self.email_subjects.password_reset - % {"server_name": self.hs.config.server.server_name, "app": self.app_name}, - template_vars, - ) - - emails_sent_counter.labels("registration") - - async def send_registration_mail( - self, email_address: str, token: str, client_secret: str, sid: str - ) -> None: - """Send an email with a registration confirmation link to a user - - Args: - email_address: Email address we're sending the registration - link to - token: Unique token generated by the server to verify - the email was received - client_secret: Unique token generated by the client to - group together multiple email sending attempts - sid: The generated session ID - """ - params = {"token": token, "client_secret": client_secret, "sid": sid} - link = ( - self.hs.config.server.public_baseurl - + "_matrix/client/unstable/registration/email/submit_token?%s" - % urllib.parse.urlencode(params) - ) - - template_vars: TemplateVars = {"link": link} - - emails_sent_counter.labels("registration").inc() - - await self.send_email( - email_address, - self.email_subjects.email_validation - % {"server_name": self.hs.config.server.server_name, "app": self.app_name}, - template_vars, - ) - - emails_sent_counter.labels("already_in_use") - - async def send_already_in_use_mail(self, email_address: str) -> None: - """Send an email if the address is already bound to an user account - - Args: - email_address: Email address we're sending to the "already in use" mail - """ - - await self.send_email( - email_address, - self.email_subjects.email_already_in_use - % {"server_name": self.hs.config.server.server_name, "app": self.app_name}, - {}, - ) - - emails_sent_counter.labels("add_threepid") - - async def send_add_threepid_mail( - self, email_address: str, token: str, client_secret: str, sid: str - ) -> None: - """Send an email with a validation link to a user for adding a 3pid to their account - - Args: - email_address: Email address we're sending the validation link to - - token: Unique token generated by the server to verify the email was received - - client_secret: Unique token generated by the client to group together - multiple email sending attempts - - sid: The generated session ID - """ - params = {"token": token, "client_secret": client_secret, "sid": sid} - link = ( - self.hs.config.server.public_baseurl - + "_matrix/client/unstable/add_threepid/email/submit_token?%s" - % urllib.parse.urlencode(params) - ) - - template_vars: TemplateVars = {"link": link} - - emails_sent_counter.labels("add_threepid").inc() - - await self.send_email( - email_address, - self.email_subjects.email_validation - % {"server_name": self.hs.config.server.server_name, "app": self.app_name}, - template_vars, - ) - - emails_sent_counter.labels("notification") - - async def send_notification_mail( - self, - app_id: str, - user_id: str, - email_address: str, - push_actions: Iterable[EmailPushAction], - reason: EmailReason, - ) -> None: - """ - Send email regarding a user's room notifications - - Params: - app_id: The application receiving the notification. - user_id: The user receiving the notification. - email_address: The email address receiving the notification. - push_actions: All outstanding notifications. - reason: The notification that was ready and is the cause of an email - being sent. - """ - rooms_in_order = deduped_ordered_list([pa.room_id for pa in push_actions]) - - notif_events = await self.store.get_events([pa.event_id for pa in push_actions]) - - notifs_by_room: Dict[str, List[EmailPushAction]] = {} - for pa in push_actions: - notifs_by_room.setdefault(pa.room_id, []).append(pa) - - # collect the current state for all the rooms in which we have - # notifications - state_by_room = {} - - try: - user_display_name = await self.store.get_profile_displayname( - UserID.from_string(user_id) - ) - if user_display_name is None: - user_display_name = user_id - except StoreError: - user_display_name = user_id - - async def _fetch_room_state(room_id: str) -> None: - room_state = await self._state_storage_controller.get_current_state_ids( - room_id - ) - state_by_room[room_id] = room_state - - # Run at most 3 of these at once: sync does 10 at a time but email - # notifs are much less realtime than sync so we can afford to wait a bit. - await concurrently_execute(_fetch_room_state, rooms_in_order, 3) - - # actually sort our so-called rooms_in_order list, most recent room first - rooms_in_order.sort(key=lambda r: -(notifs_by_room[r][-1].received_ts or 0)) - - rooms: List[RoomVars] = [] - - for r in rooms_in_order: - roomvars = await self._get_room_vars( - r, user_id, notifs_by_room[r], notif_events, state_by_room[r] - ) - rooms.append(roomvars) - - reason["room_name"] = await calculate_room_name( - self.store, - state_by_room[reason["room_id"]], - user_id, - fallback_to_members=True, - ) - - if len(notifs_by_room) == 1: - # Only one room has new stuff - room_id = list(notifs_by_room.keys())[0] - - summary_text = await self._make_summary_text_single_room( - room_id, - notifs_by_room[room_id], - state_by_room[room_id], - notif_events, - user_id, - ) - else: - summary_text = await self._make_summary_text( - notifs_by_room, state_by_room, notif_events, reason - ) - - unsubscribe_link = self._make_unsubscribe_link(user_id, app_id, email_address) - - template_vars: TemplateVars = { - "user_display_name": user_display_name, - "unsubscribe_link": unsubscribe_link, - "summary_text": summary_text, - "rooms": rooms, - "reason": reason, - } - - emails_sent_counter.labels("notification").inc() - - await self.send_email( - email_address, summary_text, template_vars, unsubscribe_link - ) - - async def send_email( - self, - email_address: str, - subject: str, - extra_template_vars: TemplateVars, - unsubscribe_link: Optional[str] = None, - ) -> None: - """Send an email with the given information and template text""" - template_vars: TemplateVars = { - "app_name": self.app_name, - "server_name": self.hs.config.server.server_name, - } - - template_vars.update(extra_template_vars) - - html_text = self.template_html.render(**template_vars) - plain_text = self.template_text.render(**template_vars) - - await self.send_email_handler.send_email( - email_address=email_address, - subject=subject, - app_name=self.app_name, - html=html_text, - text=plain_text, - # Include the List-Unsubscribe header which some clients render in the UI. - # Per RFC 2369, this can be a URL or mailto URL. See - # https://www.rfc-editor.org/rfc/rfc2369.html#section-3.2 - # - # It is preferred to use email, but Synapse doesn't support incoming email. - # - # Also include the List-Unsubscribe-Post header from RFC 8058. See - # https://www.rfc-editor.org/rfc/rfc8058.html#section-3.1 - # - # Note that many email clients will not render the unsubscribe link - # unless DKIM, etc. is properly setup. - additional_headers=( - { - "List-Unsubscribe-Post": "List-Unsubscribe=One-Click", - "List-Unsubscribe": f"<{unsubscribe_link}>", - } - if unsubscribe_link - else None - ), - ) - - async def _get_room_vars( - self, - room_id: str, - user_id: str, - notifs: Iterable[EmailPushAction], - notif_events: Dict[str, EventBase], - room_state_ids: StateMap[str], - ) -> RoomVars: - """ - Generate the variables for notifications on a per-room basis. - - Args: - room_id: The room ID - user_id: The user receiving the notification. - notifs: The outstanding push actions for this room. - notif_events: The events related to the above notifications. - room_state_ids: The event IDs of the current room state. - - Returns: - A dictionary to be added to the template context. - """ - - # Check if one of the notifs is an invite event for the user. - is_invite = False - for n in notifs: - ev = notif_events[n.event_id] - if ev.type == EventTypes.Member and ev.state_key == user_id: - if ev.content.get("membership") == Membership.INVITE: - is_invite = True - break - - room_name = await calculate_room_name(self.store, room_state_ids, user_id) - - room_vars: RoomVars = { - "title": room_name, - "hash": string_ordinal_total(room_id), # See sender avatar hash - "notifs": [], - "invite": is_invite, - "link": self._make_room_link(room_id), - "avatar_url": await self._get_room_avatar(room_state_ids), - } - - if not is_invite: - for n in notifs: - notifvars = await self._get_notif_vars( - n, user_id, notif_events[n.event_id], room_state_ids - ) - - # merge overlapping notifs together. - # relies on the notifs being in chronological order. - merge = False - if room_vars["notifs"] and "messages" in room_vars["notifs"][-1]: - prev_messages = room_vars["notifs"][-1]["messages"] - for message in notifvars["messages"]: - pm = list( - filter(lambda pm: pm["id"] == message["id"], prev_messages) - ) - if pm: - if not message["is_historical"]: - pm[0]["is_historical"] = False - merge = True - elif merge: - # we're merging, so append any remaining messages - # in this notif to the previous one - prev_messages.append(message) - - if not merge: - room_vars["notifs"].append(notifvars) - - return room_vars - - async def _get_room_avatar( - self, - room_state_ids: StateMap[str], - ) -> Optional[str]: - """ - Retrieve the avatar url for this room---if it exists. - - Args: - room_state_ids: The event IDs of the current room state. - - Returns: - room's avatar url if it's present and a string; otherwise None. - """ - event_id = room_state_ids.get((EventTypes.RoomAvatar, "")) - if event_id: - ev = await self.store.get_event(event_id) - url = ev.content.get("url") - if isinstance(url, str): - return url - return None - - async def _get_notif_vars( - self, - notif: EmailPushAction, - user_id: str, - notif_event: EventBase, - room_state_ids: StateMap[str], - ) -> NotifVars: - """ - Generate the variables for a single notification. - - Args: - notif: The outstanding notification for this room. - user_id: The user receiving the notification. - notif_event: The event related to the above notification. - room_state_ids: The event IDs of the current room state. - - Returns: - A dictionary to be added to the template context. - """ - - results = await self.store.get_events_around( - notif.room_id, - notif.event_id, - before_limit=CONTEXT_BEFORE, - after_limit=CONTEXT_AFTER, - ) - - ret: NotifVars = { - "link": self._make_notif_link(notif), - "ts": notif.received_ts, - "messages": [], - } - - the_events = await filter_events_for_client( - self._storage_controllers, - user_id, - results.events_before, - ) - the_events.append(notif_event) - - for event in the_events: - messagevars = await self._get_message_vars(notif, event, room_state_ids) - if messagevars is not None: - ret["messages"].append(messagevars) - - return ret - - async def _get_message_vars( - self, notif: EmailPushAction, event: EventBase, room_state_ids: StateMap[str] - ) -> Optional[MessageVars]: - """ - Generate the variables for a single event, if possible. - - Args: - notif: The outstanding notification for this room. - event: The event under consideration. - room_state_ids: The event IDs of the current room state. - - Returns: - A dictionary to be added to the template context, or None if the - event cannot be processed. - """ - if event.type != EventTypes.Message and event.type != EventTypes.Encrypted: - return None - - # Get the sender's name and avatar from the room state. - type_state_key = ("m.room.member", event.sender) - sender_state_event_id = room_state_ids.get(type_state_key) - if sender_state_event_id: - sender_state_event: Optional[EventBase] = await self.store.get_event( - sender_state_event_id - ) - else: - # Attempt to check the historical state for the room. - historical_state = await self._state_storage_controller.get_state_for_event( - event.event_id, StateFilter.from_types((type_state_key,)) - ) - sender_state_event = historical_state.get(type_state_key) - - if sender_state_event: - sender_name = name_from_member_event(sender_state_event) - sender_avatar_url: Optional[str] = sender_state_event.content.get( - "avatar_url" - ) - else: - # No state could be found, fallback to the MXID. - sender_name = event.sender - sender_avatar_url = None - - # 'hash' for deterministically picking default images: use - # sender_hash % the number of default images to choose from - sender_hash = string_ordinal_total(event.sender) - - ret: MessageVars = { - "event_type": event.type, - "is_historical": event.event_id != notif.event_id, - "id": event.event_id, - "ts": event.origin_server_ts, - "sender_name": sender_name, - "sender_avatar_url": sender_avatar_url, - "sender_hash": sender_hash, - } - - # Encrypted messages don't have any additional useful information. - if event.type == EventTypes.Encrypted: - return ret - - msgtype = event.content.get("msgtype") - if not isinstance(msgtype, str): - msgtype = None - - ret["msgtype"] = msgtype - - if msgtype == "m.text": - self._add_text_message_vars(ret, event) - elif msgtype == "m.image": - self._add_image_message_vars(ret, event) - - if "body" in event.content: - ret["body_text_plain"] = event.content["body"] - - return ret - - def _add_text_message_vars( - self, messagevars: MessageVars, event: EventBase - ) -> None: - """ - Potentially add a sanitised message body to the message variables. - - Args: - messagevars: The template context to be modified. - event: The event under consideration. - """ - msgformat = event.content.get("format") - if not isinstance(msgformat, str): - msgformat = None - - formatted_body = event.content.get("formatted_body") - body = event.content.get("body") - - if msgformat == "org.matrix.custom.html" and formatted_body: - messagevars["body_text_html"] = safe_markup(formatted_body) - elif body: - messagevars["body_text_html"] = safe_text(body) - - def _add_image_message_vars( - self, messagevars: MessageVars, event: EventBase - ) -> None: - """ - Potentially add an image URL to the message variables. - - Args: - messagevars: The template context to be modified. - event: The event under consideration. - """ - if "url" in event.content: - messagevars["image_url"] = event.content["url"] - - async def _make_summary_text_single_room( - self, - room_id: str, - notifs: List[EmailPushAction], - room_state_ids: StateMap[str], - notif_events: Dict[str, EventBase], - user_id: str, - ) -> str: - """ - Make a summary text for the email when only a single room has notifications. - - Args: - room_id: The ID of the room. - notifs: The push actions for this room. - room_state_ids: The state map for the room. - notif_events: A map of event ID -> notification event. - user_id: The user receiving the notification. - - Returns: - The summary text. - """ - # If the room has some kind of name, use it, but we don't - # want the generated-from-names one here otherwise we'll - # end up with, "new message from Bob in the Bob room" - room_name = await calculate_room_name( - self.store, room_state_ids, user_id, fallback_to_members=False - ) - - # See if one of the notifs is an invite event for the user - invite_event = None - for n in notifs: - ev = notif_events[n.event_id] - if ev.type == EventTypes.Member and ev.state_key == user_id: - if ev.content.get("membership") == Membership.INVITE: - invite_event = ev - break - - if invite_event: - inviter_member_event_id = room_state_ids.get( - ("m.room.member", invite_event.sender) - ) - inviter_name = invite_event.sender - if inviter_member_event_id: - inviter_member_event = await self.store.get_event( - inviter_member_event_id, allow_none=True - ) - if inviter_member_event: - inviter_name = name_from_member_event(inviter_member_event) - - if room_name is None: - return self.email_subjects.invite_from_person % { - "person": inviter_name, - "app": self.app_name, - } - - # If the room is a space, it gets a slightly different topic. - create_event_id = room_state_ids.get(("m.room.create", "")) - if create_event_id: - create_event = await self.store.get_event( - create_event_id, allow_none=True - ) - if ( - create_event - and create_event.content.get(EventContentFields.ROOM_TYPE) - == RoomTypes.SPACE - ): - return self.email_subjects.invite_from_person_to_space % { - "person": inviter_name, - "space": room_name, - "app": self.app_name, - } - - return self.email_subjects.invite_from_person_to_room % { - "person": inviter_name, - "room": room_name, - "app": self.app_name, - } - - if len(notifs) == 1: - # There is just the one notification, so give some detail - sender_name = None - event = notif_events[notifs[0].event_id] - if ("m.room.member", event.sender) in room_state_ids: - state_event_id = room_state_ids[("m.room.member", event.sender)] - state_event = await self.store.get_event(state_event_id) - sender_name = name_from_member_event(state_event) - - if sender_name is not None and room_name is not None: - return self.email_subjects.message_from_person_in_room % { - "person": sender_name, - "room": room_name, - "app": self.app_name, - } - elif sender_name is not None: - return self.email_subjects.message_from_person % { - "person": sender_name, - "app": self.app_name, - } - - # The sender is unknown, just use the room name (or ID). - return self.email_subjects.messages_in_room % { - "room": room_name or room_id, - "app": self.app_name, - } - else: - # There's more than one notification for this room, so just - # say there are several - if room_name is not None: - return self.email_subjects.messages_in_room % { - "room": room_name, - "app": self.app_name, - } - - return await self._make_summary_text_from_member_events( - room_id, notifs, room_state_ids, notif_events - ) - - async def _make_summary_text( - self, - notifs_by_room: Dict[str, List[EmailPushAction]], - room_state_ids: Dict[str, StateMap[str]], - notif_events: Dict[str, EventBase], - reason: EmailReason, - ) -> str: - """ - Make a summary text for the email when multiple rooms have notifications. - - Args: - notifs_by_room: A map of room ID to the push actions for that room. - room_state_ids: A map of room ID to the state map for that room. - notif_events: A map of event ID -> notification event. - reason: The reason this notification is being sent. - - Returns: - The summary text. - """ - # Stuff's happened in multiple different rooms - # ...but we still refer to the 'reason' room which triggered the mail - if reason["room_name"] is not None: - return self.email_subjects.messages_in_room_and_others % { - "room": reason["room_name"], - "app": self.app_name, - } - - room_id = reason["room_id"] - return await self._make_summary_text_from_member_events( - room_id, notifs_by_room[room_id], room_state_ids[room_id], notif_events - ) - - async def _make_summary_text_from_member_events( - self, - room_id: str, - notifs: List[EmailPushAction], - room_state_ids: StateMap[str], - notif_events: Dict[str, EventBase], - ) -> str: - """ - Make a summary text for the email when only a single room has notifications. - - Args: - room_id: The ID of the room. - notifs: The push actions for this room. - room_state_ids: The state map for the room. - notif_events: A map of event ID -> notification event. - - Returns: - The summary text. - """ - # If the room doesn't have a name, say who the messages - # are from explicitly to avoid, "messages in the Bob room" - - # Find the latest event ID for each sender, note that the notifications - # are already in descending received_ts. - sender_ids = {} - for n in notifs: - sender = notif_events[n.event_id].sender - if sender not in sender_ids: - sender_ids[sender] = n.event_id - - # Get the actual member events (in order to calculate a pretty name for - # the room). - member_event_ids = [] - member_events = {} - for sender_id, event_id in sender_ids.items(): - type_state_key = ("m.room.member", sender_id) - sender_state_event_id = room_state_ids.get(type_state_key) - if sender_state_event_id: - member_event_ids.append(sender_state_event_id) - else: - # Attempt to check the historical state for the room. - historical_state = ( - await self._state_storage_controller.get_state_for_event( - event_id, StateFilter.from_types((type_state_key,)) - ) - ) - sender_state_event = historical_state.get(type_state_key) - if sender_state_event: - member_events[event_id] = sender_state_event - member_events.update(await self.store.get_events(member_event_ids)) - - if not member_events: - # No member events were found! Maybe the room is empty? - # Fallback to the room ID (note that if there was a room name this - # would already have been used previously). - return self.email_subjects.messages_in_room % { - "room": room_id, - "app": self.app_name, - } - - # There was a single sender. - if len(member_events) == 1: - return self.email_subjects.messages_from_person % { - "person": descriptor_from_member_events(member_events.values()), - "app": self.app_name, - } - - # There was more than one sender, use the first one and a tweaked template. - return self.email_subjects.messages_from_person_and_others % { - "person": descriptor_from_member_events(list(member_events.values())[:1]), - "app": self.app_name, - } - - def _make_room_link(self, room_id: str) -> str: - """ - Generate a link to open a room in the web client. - - Args: - room_id: The room ID to generate a link to. - - Returns: - A link to open a room in the web client. - """ - if self.hs.config.email.email_riot_base_url: - base_url = "%s/#/room" % (self.hs.config.email.email_riot_base_url) - elif self.app_name == "Vector": - # need /beta for Universal Links to work on iOS - base_url = "https://vector.im/beta/#/room" - else: - base_url = "https://matrix.to/#" - return "%s/%s" % (base_url, room_id) - - def _make_notif_link(self, notif: EmailPushAction) -> str: - """ - Generate a link to open an event in the web client. - - Args: - notif: The notification to generate a link for. - - Returns: - A link to open the notification in the web client. - """ - if self.hs.config.email.email_riot_base_url: - return "%s/#/room/%s/%s" % ( - self.hs.config.email.email_riot_base_url, - notif.room_id, - notif.event_id, - ) - elif self.app_name == "Vector": - # need /beta for Universal Links to work on iOS - return "https://vector.im/beta/#/room/%s/%s" % ( - notif.room_id, - notif.event_id, - ) - else: - return "https://matrix.to/#/%s/%s" % (notif.room_id, notif.event_id) - - def _make_unsubscribe_link( - self, user_id: str, app_id: str, email_address: str - ) -> str: - """ - Generate a link to unsubscribe from email notifications. - - Args: - user_id: The user receiving the notification. - app_id: The application receiving the notification. - email_address: The email address receiving the notification. - - Returns: - A link to unsubscribe from email notifications. - """ - params = { - "access_token": self.macaroon_gen.generate_delete_pusher_token( - user_id, app_id, email_address - ), - "app_id": app_id, - "pushkey": email_address, - } - - return "%s_synapse/client/unsubscribe?%s" % ( - self.hs.config.server.public_baseurl, - urllib.parse.urlencode(params), - ) - - -def safe_markup(raw_html: str) -> Markup: - """ - Sanitise a raw HTML string to a set of allowed tags and attributes, and linkify any bare URLs. - - Args - raw_html: Unsafe HTML. - - Returns: - A Markup object ready to safely use in a Jinja template. - """ - return Markup( - bleach.linkify( - bleach.clean( - raw_html, - tags=ALLOWED_TAGS, - attributes=ALLOWED_ATTRS, - # bleach master has this, but it isn't released yet - # protocols=ALLOWED_SCHEMES, - strip=True, - ) - ) - ) - - -def safe_text(raw_text: str) -> Markup: - """ - Sanitise text (escape any HTML tags), and then linkify any bare URLs. - - Args - raw_text: Unsafe text which might include HTML markup. - - Returns: - A Markup object ready to safely use in a Jinja template. - """ - return Markup( - bleach.linkify(bleach.clean(raw_text, tags=[], attributes=[], strip=False)) - ) - - -def deduped_ordered_list(it: Iterable[T]) -> List[T]: - seen = set() - ret = [] - for item in it: - if item not in seen: - seen.add(item) - ret.append(item) - return ret - - -def string_ordinal_total(s: str) -> int: - tot = 0 - for c in s: - tot += ord(c) - return tot diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index 1ef881f702..3f3e4a9234 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py
@@ -74,9 +74,13 @@ async def get_context_for_event( room_state = [] if ev.content.get("membership") == Membership.INVITE: - room_state = ev.unsigned.get("invite_room_state", []) + invite_room_state = ev.unsigned.get("invite_room_state", []) + if isinstance(invite_room_state, list): + room_state = invite_room_state elif ev.content.get("membership") == Membership.KNOCK: - room_state = ev.unsigned.get("knock_room_state", []) + knock_room_state = ev.unsigned.get("knock_room_state", []) + if isinstance(knock_room_state, list): + room_state = knock_room_state # Ideally we'd reuse the logic in `calculate_room_name`, but that gets # complicated to handle partial events vs pulling events from the DB. diff --git a/synapse/push/push_types.py b/synapse/push/push_types.py
index 201ec97219..57fa926a46 100644 --- a/synapse/push/push_types.py +++ b/synapse/push/push_types.py
@@ -18,9 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import List, Optional - -from typing_extensions import TypedDict +from typing import List, Optional, TypedDict class EmailReason(TypedDict, total=False): diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py
index 9a5dd7a9d4..39bfe0dd33 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py
@@ -23,9 +23,7 @@ import logging from typing import TYPE_CHECKING, Callable, Dict, Optional from synapse.push import Pusher, PusherConfig -from synapse.push.emailpusher import EmailPusher from synapse.push.httppusher import HttpPusher -from synapse.push.mailer import Mailer if TYPE_CHECKING: from synapse.server import HomeServer @@ -42,17 +40,6 @@ class PusherFactory: "http": HttpPusher } - logger.info("email enable notifs: %r", hs.config.email.email_enable_notifs) - if hs.config.email.email_enable_notifs: - self.mailers: Dict[str, Mailer] = {} - - self._notif_template_html = hs.config.email.email_notif_template_html - self._notif_template_text = hs.config.email.email_notif_template_text - - self.pusher_types["email"] = self._create_email_pusher - - logger.info("defined email pusher type") - def create_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]: kind = pusher_config.kind f = self.pusher_types.get(kind, None) @@ -60,28 +47,3 @@ class PusherFactory: return None logger.debug("creating %s pusher for %r", kind, pusher_config) return f(self.hs, pusher_config) - - def _create_email_pusher( - self, _hs: "HomeServer", pusher_config: PusherConfig - ) -> EmailPusher: - app_name = self._app_name_from_pusherdict(pusher_config) - mailer = self.mailers.get(app_name) - if not mailer: - mailer = Mailer( - hs=self.hs, - app_name=app_name, - template_html=self._notif_template_html, - template_text=self._notif_template_text, - ) - self.mailers[app_name] = mailer - return EmailPusher(self.hs, pusher_config, mailer) - - def _app_name_from_pusherdict(self, pusher_config: PusherConfig) -> str: - data = pusher_config.data - - if isinstance(data, dict): - brand = data.get("brand") - if isinstance(brand, str): - return brand - - return self.config.email.email_app_name diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 0a7541b4c7..bf80ac97a1 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py
@@ -34,7 +34,6 @@ from synapse.push.pusher import PusherFactory from synapse.replication.http.push import ReplicationRemovePusherRestServlet from synapse.types import JsonDict, RoomStreamToken, StrCollection from synapse.util.async_helpers import concurrently_execute -from synapse.util.threepids import canonicalise_email if TYPE_CHECKING: from synapse.server import HomeServer @@ -122,11 +121,7 @@ class PusherPool: """ if kind == "email": - email_owner = await self.store.get_user_id_by_threepid( - "email", canonicalise_email(pushkey) - ) - if email_owner != user_id: - raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) + raise SynapseError(400, "Threepids are not supported on this server", "M_UNSUPPORTED") time_now_msec = self.clock.time_msec() diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py
index c9cf838255..d500051714 100644 --- a/synapse/replication/http/__init__.py +++ b/synapse/replication/http/__init__.py
@@ -1,7 +1,7 @@ # # This file is licensed under the Affero General Public License (AGPL) version 3. # -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -23,6 +23,7 @@ from typing import TYPE_CHECKING from synapse.http.server import JsonResource from synapse.replication.http import ( account_data, + delayed_events, devices, federation, login, @@ -64,3 +65,4 @@ class ReplicationRestResource(JsonResource): login.register_servlets(hs, self) register.register_servlets(hs, self) devices.register_servlets(hs, self) + delayed_events.register_servlets(hs, self) diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py
index 9aa8d90bfe..0002538680 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py
@@ -128,9 +128,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): # We reserve `instance_name` as a parameter to sending requests, so we # assert here that sub classes don't try and use the name. - assert ( - "instance_name" not in self.PATH_ARGS - ), "`instance_name` is a reserved parameter name" + assert "instance_name" not in self.PATH_ARGS, ( + "`instance_name` is a reserved parameter name" + ) assert ( "instance_name" not in signature(self.__class__._serialize_payload).parameters diff --git a/synapse/replication/http/delayed_events.py b/synapse/replication/http/delayed_events.py new file mode 100644
index 0000000000..229022070c --- /dev/null +++ b/synapse/replication/http/delayed_events.py
@@ -0,0 +1,62 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +import logging +from typing import TYPE_CHECKING, Dict, Optional, Tuple + +from twisted.web.server import Request + +from synapse.http.server import HttpServer +from synapse.replication.http._base import ReplicationEndpoint +from synapse.types import JsonDict, JsonMapping + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class ReplicationAddedDelayedEventRestServlet(ReplicationEndpoint): + """Handle a delayed event being added by another worker. + + Request format: + + POST /_synapse/replication/delayed_event_added/ + + {} + """ + + NAME = "added_delayed_event" + PATH_ARGS = () + CACHE = False + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + + self.handler = hs.get_delayed_events_handler() + + @staticmethod + async def _serialize_payload(next_send_ts: int) -> JsonDict: # type: ignore[override] + return {"next_send_ts": next_send_ts} + + async def _handle_request( # type: ignore[override] + self, request: Request, content: JsonDict + ) -> Tuple[int, Dict[str, Optional[JsonMapping]]]: + self.handler.on_added(int(content["next_send_ts"])) + + return 200, {} + + +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + ReplicationAddedDelayedEventRestServlet(hs).register(http_server) diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py
index 9c537427df..940f418396 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py
@@ -119,7 +119,9 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): return payload - async def _handle_request(self, request: Request, content: JsonDict) -> Tuple[int, JsonDict]: # type: ignore[override] + async def _handle_request( # type: ignore[override] + self, request: Request, content: JsonDict + ) -> Tuple[int, JsonDict]: with Measure(self.clock, "repl_fed_send_events_parse"): room_id = content["room_id"] backfilled = content["backfilled"] diff --git a/synapse/replication/http/push.py b/synapse/replication/http/push.py
index de07e75b46..48e254cdb1 100644 --- a/synapse/replication/http/push.py +++ b/synapse/replication/http/push.py
@@ -48,7 +48,7 @@ class ReplicationRemovePusherRestServlet(ReplicationEndpoint): """ - NAME = "add_user_account_data" + NAME = "remove_pusher" PATH_ARGS = ("user_id",) CACHE = False @@ -98,7 +98,9 @@ class ReplicationCopyPusherRestServlet(ReplicationEndpoint): self._store = hs.get_datastores().main @staticmethod - async def _serialize_payload(user_id: str, old_room_id: str, new_room_id: str) -> JsonDict: # type: ignore[override] + async def _serialize_payload( # type: ignore[override] + user_id: str, old_room_id: str, new_room_id: str + ) -> JsonDict: return {} async def _handle_request( # type: ignore[override] @@ -109,7 +111,6 @@ class ReplicationCopyPusherRestServlet(ReplicationEndpoint): old_room_id: str, new_room_id: str, ) -> Tuple[int, JsonDict]: - await self._store.copy_push_rules_from_room_to_room_for_user( old_room_id, new_room_id, user_id ) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index 3dddbb70b4..0bd5478cd3 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py
@@ -18,8 +18,8 @@ # [This file includes modifications made by New Vector Limited] # # -"""A replication client for use by synapse workers. -""" +"""A replication client for use by synapse workers.""" + import logging from typing import TYPE_CHECKING, Dict, Iterable, Optional, Set, Tuple diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py
index b7a7e77597..6ab5356660 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py
@@ -23,6 +23,7 @@ The VALID_SERVER_COMMANDS and VALID_CLIENT_COMMANDS define which commands are allowed to be sent by which side. """ + import abc import logging from typing import List, Optional, Tuple, Type, TypeVar @@ -494,7 +495,7 @@ class LockReleasedCommand(Command): class NewActiveTaskCommand(_SimpleCommand): - """Sent to inform instance handling background tasks that a new active task is available to run. + """Sent to inform instance handling background tasks that a new task is ready to run. Format:: diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index 72a42cb6cc..1fafbb48c3 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py
@@ -727,7 +727,7 @@ class ReplicationCommandHandler: ) -> None: """Called when get a new NEW_ACTIVE_TASK command.""" if self._task_scheduler: - self._task_scheduler.launch_task_by_id(cmd.data) + self._task_scheduler.on_new_task(cmd.data) def new_connection(self, connection: IReplicationConnection) -> None: """Called when we have a new connection.""" @@ -857,7 +857,7 @@ UpdateRow = TypeVar("UpdateRow") def _batch_updates( - updates: Iterable[Tuple[UpdateToken, UpdateRow]] + updates: Iterable[Tuple[UpdateToken, UpdateRow]], ) -> Iterator[Tuple[UpdateToken, List[UpdateRow]]]: """Collect stream updates with the same token together diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 4471cc8f0c..fb9c539122 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py
@@ -23,6 +23,7 @@ protocols. An explanation of this protocol is available in docs/tcp_replication.md """ + import fcntl import logging import struct diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py
index c0329378ac..d647a2b332 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py
@@ -18,8 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -"""The server side of the replication stream. -""" +"""The server side of the replication stream.""" import logging import random @@ -307,7 +306,7 @@ class ReplicationStreamer: def _batch_updates( - updates: List[Tuple[Token, StreamRow]] + updates: List[Tuple[Token, StreamRow]], ) -> List[Tuple[Optional[Token], StreamRow]]: """Takes a list of updates of form [(token, row)] and sets the token to None for all rows where the next row has the same token. This is used to diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index d021904de7..ebf5964d29 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py
@@ -247,7 +247,7 @@ class _StreamFromIdGen(Stream): def current_token_without_instance( - current_token: Callable[[], int] + current_token: Callable[[], int], ) -> Callable[[str], int]: """Takes a current token callback function for a single writer stream that doesn't take an instance name parameter and wraps it in a function that diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py
index ea0803dfc2..05b55fb033 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py
@@ -200,9 +200,9 @@ class EventsStream(_StreamFromIdGen): # we rely on get_all_new_forward_event_rows strictly honouring the limit, so # that we know it is safe to just take upper_limit = event_rows[-1][0]. - assert ( - len(event_rows) <= target_row_count - ), "get_all_new_forward_event_rows did not honour row limit" + assert len(event_rows) <= target_row_count, ( + "get_all_new_forward_event_rows did not honour row limit" + ) # if we hit the limit on event_updates, there's no point in going beyond the # last stream_id in the batch for the other sources. diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index c5cdc36955..00f108de08 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py
@@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2014-2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -29,8 +29,9 @@ from synapse.rest.client import ( account_validity, appservice_ping, auth, - auth_issuer, + auth_metadata, capabilities, + delayed_events, devices, directory, events, @@ -81,6 +82,7 @@ CLIENT_SERVLET_FUNCTIONS: Tuple[RegisterServletsFunc, ...] = ( room.register_deprecated_servlets, events.register_servlets, room.register_servlets, + delayed_events.register_servlets, login.register_servlets, profile.register_servlets, presence.register_servlets, @@ -119,7 +121,7 @@ CLIENT_SERVLET_FUNCTIONS: Tuple[RegisterServletsFunc, ...] = ( mutual_rooms.register_servlets, login_token_request.register_servlets, rendezvous.register_servlets, - auth_issuer.register_servlets, + auth_metadata.register_servlets, ) SERVLET_GROUPS: Dict[str, Iterable[RegisterServletsFunc]] = { @@ -185,7 +187,6 @@ class ClientRestResource(JsonResource): mutual_rooms.register_servlets, login_token_request.register_servlets, rendezvous.register_servlets, - auth_issuer.register_servlets, ]: continue diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py
index cdaee17451..b37bf3429b 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py
@@ -39,7 +39,7 @@ from typing import TYPE_CHECKING, Optional, Tuple from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.handlers.pagination import PURGE_HISTORY_ACTION_NAME -from synapse.http.server import HttpServer, JsonResource +from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin @@ -51,6 +51,7 @@ from synapse.rest.admin.background_updates import ( from synapse.rest.admin.devices import ( DeleteDevicesRestServlet, DeviceRestServlet, + DevicesGetRestServlet, DevicesRestServlet, ) from synapse.rest.admin.event_reports import ( @@ -86,6 +87,7 @@ from synapse.rest.admin.rooms import ( RoomStateRestServlet, RoomTimestampToEventRestServlet, ) +from synapse.rest.admin.scheduled_tasks import ScheduledTasksRestServlet from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet from synapse.rest.admin.statistics import ( LargestRoomsStatistics, @@ -98,13 +100,16 @@ from synapse.rest.admin.users import ( DeactivateAccountRestServlet, PushersRestServlet, RateLimitRestServlet, + RedactUser, + RedactUserStatus, ResetPasswordRestServlet, SearchUsersRestServlet, ShadowBanRestServlet, SuspendAccountRestServlet, UserAdminServlet, UserByExternalId, - UserByThreePid, + UserInvitesCount, + UserJoinedRoomCount, UserMembershipRestServlet, UserRegisterServlet, UserReplaceMasterCrossSigningKeyRestServlet, @@ -201,8 +206,7 @@ class PurgeHistoryRestServlet(RestServlet): (stream, topo, _event_id) = r token = "t%d-%d" % (topo, stream) logger.info( - "[purge] purging up to token %s (received_ts %i => " - "stream_ordering %i)", + "[purge] purging up to token %s (received_ts %i => stream_ordering %i)", token, ts, stream_ordering, @@ -259,27 +263,24 @@ class PurgeHistoryStatusRestServlet(RestServlet): ######################################################################################## -class AdminRestResource(JsonResource): - """The REST resource which gets mounted at /_synapse/admin""" - - def __init__(self, hs: "HomeServer"): - JsonResource.__init__(self, hs, canonical_json=False) - register_servlets(hs, self) - - def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: """ Register all the admin servlets. """ - # Admin servlets aren't registered on workers. + RoomRestServlet(hs).register(http_server) + + # Admin servlets below may not work on workers. if hs.config.worker.worker_app is not None: + # Some admin servlets can be mounted on workers when MSC3861 is enabled. + if hs.config.experimental.msc3861.enabled: + register_servlets_for_msc3861_delegation(hs, http_server) + return register_servlets_for_client_rest_resource(hs, http_server) BlockRoomRestServlet(hs).register(http_server) ListRoomRestServlet(hs).register(http_server) RoomStateRestServlet(hs).register(http_server) - RoomRestServlet(hs).register(http_server) RoomRestV2Servlet(hs).register(http_server) RoomMembersRestServlet(hs).register(http_server) DeleteRoomStatusByDeleteIdRestServlet(hs).register(http_server) @@ -318,7 +319,10 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: RoomTimestampToEventRestServlet(hs).register(http_server) UserReplaceMasterCrossSigningKeyRestServlet(hs).register(http_server) UserByExternalId(hs).register(http_server) - UserByThreePid(hs).register(http_server) + RedactUser(hs).register(http_server) + RedactUserStatus(hs).register(http_server) + UserInvitesCount(hs).register(http_server) + UserJoinedRoomCount(hs).register(http_server) DeviceRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) @@ -328,8 +332,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: BackgroundUpdateRestServlet(hs).register(http_server) BackgroundUpdateStartJobRestServlet(hs).register(http_server) ExperimentalFeaturesRestServlet(hs).register(http_server) - if hs.config.experimental.msc3823_account_suspension: - SuspendAccountRestServlet(hs).register(http_server) + SuspendAccountRestServlet(hs).register(http_server) + ScheduledTasksRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource( @@ -357,4 +361,16 @@ def register_servlets_for_client_rest_resource( ListMediaInRoom(hs).register(http_server) # don't add more things here: new servlets should only be exposed on - # /_synapse/admin so should not go here. Instead register them in AdminRestResource. + # /_synapse/admin so should not go here. Instead register them in register_servlets. + + +def register_servlets_for_msc3861_delegation( + hs: "HomeServer", http_server: HttpServer +) -> None: + """Register servlets needed by MAS when MSC3861 is enabled""" + assert hs.config.experimental.msc3861.enabled + + UserRestServletV2(hs).register(http_server) + UsernameAvailableRestServlet(hs).register(http_server) + UserReplaceMasterCrossSigningKeyRestServlet(hs).register(http_server) + DevicesGetRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py
index 449b066923..09baf8ce21 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py
@@ -113,18 +113,19 @@ class DeviceRestServlet(RestServlet): return HTTPStatus.OK, {} -class DevicesRestServlet(RestServlet): +class DevicesGetRestServlet(RestServlet): """ Retrieve the given user's devices + + This can be mounted on workers as it is read-only, as opposed + to `DevicesRestServlet`. """ PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/devices$", "v2") def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() - handler = hs.get_device_handler() - assert isinstance(handler, DeviceHandler) - self.device_handler = handler + self.device_worker_handler = hs.get_device_handler() self.store = hs.get_datastores().main self.is_mine = hs.is_mine @@ -141,9 +142,35 @@ class DevicesRestServlet(RestServlet): if u is None: raise NotFoundError("Unknown user") - devices = await self.device_handler.get_devices_by_user(target_user.to_string()) + devices = await self.device_worker_handler.get_devices_by_user( + target_user.to_string() + ) + + # mark the dehydrated device by adding a "dehydrated" flag + dehydrated_device_info = await self.device_worker_handler.get_dehydrated_device( + target_user.to_string() + ) + if dehydrated_device_info: + dehydrated_device_id = dehydrated_device_info[0] + for device in devices: + is_dehydrated = device["device_id"] == dehydrated_device_id + device["dehydrated"] = is_dehydrated + return HTTPStatus.OK, {"devices": devices, "total": len(devices)} + +class DevicesRestServlet(DevicesGetRestServlet): + """ + Retrieve the given user's devices + """ + + PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/devices$", "v2") + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + assert isinstance(self.device_worker_handler, DeviceHandler) + self.device_handler = self.device_worker_handler + async def on_POST( self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py
index 9fb68bfa46..ff1abc0697 100644 --- a/synapse/rest/admin/event_reports.py +++ b/synapse/rest/admin/event_reports.py
@@ -50,8 +50,10 @@ class EventReportsRestServlet(RestServlet): The parameters `from` and `limit` are required only for pagination. By default, a `limit` of 100 is used. The parameter `dir` can be used to define the order of results. - The parameter `user_id` can be used to filter by user id. - The parameter `room_id` can be used to filter by room id. + The `user_id` query parameter filters by the user ID of the reporter of the event. + The `room_id` query parameter filters by room id. + The `event_sender_user_id` query parameter can be used to filter by the user id + of the sender of the reported event. Returns: A list of reported events and an integer representing the total number of reported events that exist given this query @@ -71,6 +73,7 @@ class EventReportsRestServlet(RestServlet): direction = parse_enum(request, "dir", Direction, Direction.BACKWARDS) user_id = parse_string(request, "user_id") room_id = parse_string(request, "room_id") + event_sender_user_id = parse_string(request, "event_sender_user_id") if start < 0: raise SynapseError( @@ -87,7 +90,7 @@ class EventReportsRestServlet(RestServlet): ) event_reports, total = await self._store.get_event_reports_paginate( - start, limit, direction, user_id, room_id + start, limit, direction, user_id, room_id, event_sender_user_id ) ret = {"event_reports": event_reports, "total": total} if (start + limit) < total: diff --git a/synapse/rest/admin/experimental_features.py b/synapse/rest/admin/experimental_features.py
index d7913896d9..afb71f4a0f 100644 --- a/synapse/rest/admin/experimental_features.py +++ b/synapse/rest/admin/experimental_features.py
@@ -43,12 +43,15 @@ class ExperimentalFeature(str, Enum): MSC3881 = "msc3881" MSC3575 = "msc3575" + MSC4222 = "msc4222" def is_globally_enabled(self, config: "HomeServerConfig") -> bool: if self is ExperimentalFeature.MSC3881: return config.experimental.msc3881_enabled if self is ExperimentalFeature.MSC3575: return config.experimental.msc3575_enabled + if self is ExperimentalFeature.MSC4222: + return config.experimental.msc4222_enabled assert_never(self) diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py
index 0867f7a51c..bec2331590 100644 --- a/synapse/rest/admin/registration_tokens.py +++ b/synapse/rest/admin/registration_tokens.py
@@ -181,8 +181,7 @@ class NewRegistrationTokenRestServlet(RestServlet): uses_allowed = body.get("uses_allowed", None) if not ( - uses_allowed is None - or (type(uses_allowed) is int and uses_allowed >= 0) # noqa: E721 + uses_allowed is None or (type(uses_allowed) is int and uses_allowed >= 0) # noqa: E721 ): raise SynapseError( HTTPStatus.BAD_REQUEST, diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index 01f9de9ffa..adac1f0362 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py
@@ -23,6 +23,7 @@ from http import HTTPStatus from typing import TYPE_CHECKING, List, Optional, Tuple, cast import attr +from immutabledict import immutabledict from synapse.api.constants import Direction, EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError @@ -149,6 +150,7 @@ class RoomRestV2Servlet(RestServlet): def _convert_delete_task_to_response(task: ScheduledTask) -> JsonDict: return { "delete_id": task.id, + "room_id": task.resource_id, "status": task.status, "shutdown_room": task.result, } @@ -249,6 +251,10 @@ class ListRoomRestServlet(RestServlet): direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) reverse_order = True if direction == Direction.BACKWARDS else False + emma_include_tombstone = parse_boolean( + request, "emma_include_tombstone", default=False + ) + # Return list of rooms according to parameters rooms, total_rooms = await self.store.get_rooms_paginate( start, @@ -258,6 +264,7 @@ class ListRoomRestServlet(RestServlet): search_term, public_rooms, empty_rooms, + emma_include_tombstone = emma_include_tombstone ) response = { @@ -463,7 +470,18 @@ class RoomStateRestServlet(RestServlet): if not room: raise NotFoundError("Room not found") - event_ids = await self._storage_controllers.state.get_current_state_ids(room_id) + state_filter = None + type = parse_string(request, "type") + + if type: + state_filter = StateFilter( + types=immutabledict({type: None}), + include_others=False, + ) + + event_ids = await self._storage_controllers.state.get_current_state_ids( + room_id, state_filter + ) events = await self.store.get_events(event_ids.values()) now = self.clock.time_msec() room_state = await self._event_serializer.serialize_events(events.values(), now) diff --git a/synapse/rest/admin/scheduled_tasks.py b/synapse/rest/admin/scheduled_tasks.py new file mode 100644
index 0000000000..2ae13021b9 --- /dev/null +++ b/synapse/rest/admin/scheduled_tasks.py
@@ -0,0 +1,70 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# +# +from typing import TYPE_CHECKING, Tuple + +from synapse.http.servlet import RestServlet, parse_integer, parse_string +from synapse.http.site import SynapseRequest +from synapse.rest.admin import admin_patterns, assert_requester_is_admin +from synapse.types import JsonDict, TaskStatus + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class ScheduledTasksRestServlet(RestServlet): + """Get a list of scheduled tasks and their statuses + optionally filtered by action name, resource id, status, and max timestamp + """ + + PATTERNS = admin_patterns("/scheduled_tasks$") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._store = hs.get_datastores().main + + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + + # extract query params + action_name = parse_string(request, "action_name") + resource_id = parse_string(request, "resource_id") + status = parse_string(request, "job_status") + max_timestamp = parse_integer(request, "max_timestamp") + + actions = [action_name] if action_name else None + statuses = [TaskStatus(status)] if status else None + + tasks = await self._store.get_scheduled_tasks( + actions=actions, + resource_id=resource_id, + statuses=statuses, + max_timestamp=max_timestamp, + ) + + json_tasks = [] + for task in tasks: + result_task = { + "id": task.id, + "action": task.action, + "status": task.status, + "timestamp_ms": task.timestamp, + "resource_id": task.resource_id, + "result": task.result, + "error": task.error, + } + json_tasks.append(result_task) + + return 200, {"scheduled_tasks": json_tasks} diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index ad515bd5a3..7671e020e0 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py
@@ -27,8 +27,8 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import attr -from synapse._pydantic_compat import HAS_PYDANTIC_V2 -from synapse.api.constants import Direction, UserTypes +from synapse._pydantic_compat import StrictBool, StrictInt, StrictStr +from synapse.api.constants import Direction from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( RestServlet, @@ -50,17 +50,12 @@ from synapse.rest.admin._base import ( from synapse.rest.client._base import client_patterns from synapse.storage.databases.main.registration import ExternalIDReuseException from synapse.storage.databases.main.stats import UserSortOrder -from synapse.types import JsonDict, JsonMapping, UserID +from synapse.types import JsonDict, JsonMapping, TaskStatus, UserID from synapse.types.rest import RequestBodyModel if TYPE_CHECKING: from synapse.server import HomeServer -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import StrictBool -else: - from pydantic import StrictBool - logger = logging.getLogger(__name__) @@ -235,6 +230,7 @@ class UserRestServletV2(RestServlet): self.registration_handler = hs.get_registration_handler() self.pusher_pool = hs.get_pusherpool() self._msc3866_enabled = hs.config.experimental.msc3866.enabled + self._all_user_types = hs.config.user_types.all_user_types async def on_GET( self, request: SynapseRequest, user_id: str @@ -269,12 +265,6 @@ class UserRestServletV2(RestServlet): user = await self.admin_handler.get_user(target_user) user_id = target_user.to_string() - # check for required parameters for each threepid - threepids = body.get("threepids") - if threepids is not None: - for threepid in threepids: - assert_params_in_dict(threepid, ["medium", "address"]) - # check for required parameters for each external_id external_ids = body.get("external_ids") if external_ids is not None: @@ -282,7 +272,7 @@ class UserRestServletV2(RestServlet): assert_params_in_dict(external_id, ["auth_provider", "external_id"]) user_type = body.get("user_type", None) - if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: + if user_type is not None and user_type not in self._all_user_types: raise SynapseError(HTTPStatus.BAD_REQUEST, "Invalid user type") set_admin_to = body.get("admin", False) @@ -338,51 +328,12 @@ class UserRestServletV2(RestServlet): for external_id in external_ids ] - # convert List[Dict[str, str]] into Set[Tuple[str, str]] - if threepids is not None: - new_threepids = { - (threepid["medium"], threepid["address"]) for threepid in threepids - } - if user: # modify user if "displayname" in body: await self.profile_handler.set_displayname( target_user, requester, body["displayname"], True ) - if threepids is not None: - # get changed threepids (added and removed) - cur_threepids = { - (threepid.medium, threepid.address) - for threepid in await self.store.user_get_threepids(user_id) - } - add_threepids = new_threepids - cur_threepids - del_threepids = cur_threepids - new_threepids - - # remove old threepids - for medium, address in del_threepids: - try: - # Attempt to remove any known bindings of this third-party ID - # and user ID from identity servers. - await self.hs.get_identity_handler().try_unbind_threepid( - user_id, medium, address, id_server=None - ) - except Exception: - logger.exception("Failed to remove threepids") - raise SynapseError(500, "Failed to remove threepids") - - # Delete the local association of this user ID and third-party ID. - await self.auth_handler.delete_local_threepid( - user_id, medium, address - ) - - # add new threepids - current_time = self.hs.get_clock().time_msec() - for medium, address in add_threepids: - await self.auth_handler.add_threepid( - user_id, medium, address, current_time - ) - if external_ids is not None: try: await self.store.replace_user_external_id( @@ -467,28 +418,6 @@ class UserRestServletV2(RestServlet): approved=new_user_approved, ) - if threepids is not None: - current_time = self.hs.get_clock().time_msec() - for medium, address in new_threepids: - await self.auth_handler.add_threepid( - user_id, medium, address, current_time - ) - if ( - self.hs.config.email.email_enable_notifs - and self.hs.config.email.email_notif_for_new_users - and medium == "email" - ): - await self.pusher_pool.add_or_update_pusher( - user_id=user_id, - kind="email", - app_id="m.email", - app_display_name="Email Notifications", - device_display_name=address, - pushkey=address, - lang=None, - data={}, - ) - if external_ids is not None: try: for auth_provider, external_id in new_external_ids: @@ -529,6 +458,7 @@ class UserRegisterServlet(RestServlet): self.reactor = hs.get_reactor() self.nonces: Dict[str, int] = {} self.hs = hs + self._all_user_types = hs.config.user_types.all_user_types def _clear_old_nonces(self) -> None: """ @@ -610,7 +540,7 @@ class UserRegisterServlet(RestServlet): user_type = body.get("user_type", None) displayname = body.get("displayname", None) - if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: + if user_type is not None and user_type not in self._all_user_types: raise SynapseError(HTTPStatus.BAD_REQUEST, "Invalid user type") if "mac" not in body: @@ -988,7 +918,7 @@ class UserAdminServlet(RestServlet): class UserMembershipRestServlet(RestServlet): """ - Get room list of an user. + Get list of joined room ID's for a user. """ PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/joined_rooms$") @@ -1004,8 +934,9 @@ class UserMembershipRestServlet(RestServlet): await assert_requester_is_admin(self.auth, request) room_ids = await self.store.get_rooms_for_user(user_id) - ret = {"joined_rooms": list(room_ids), "total": len(room_ids)} - return HTTPStatus.OK, ret + rooms_response = {"joined_rooms": list(room_ids), "total": len(room_ids)} + + return HTTPStatus.OK, rooms_response class PushersRestServlet(RestServlet): @@ -1387,26 +1318,144 @@ class UserByExternalId(RestServlet): return HTTPStatus.OK, {"user_id": user_id} -class UserByThreePid(RestServlet): - """Find a user based on 3PID of a particular medium""" +class RedactUser(RestServlet): + """ + Redact all the events of a given user in the given rooms or if empty dict is provided + then all events in all rooms user is member of. Kicks off a background process and + returns an id that can be used to check on the progress of the redaction progress + """ - PATTERNS = admin_patterns("/threepid/(?P<medium>[^/]*)/users/(?P<address>[^/]*)") + PATTERNS = admin_patterns("/user/(?P<user_id>[^/]*)/redact") def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() self._store = hs.get_datastores().main + self.admin_handler = hs.get_admin_handler() + + class PostBody(RequestBodyModel): + rooms: List[StrictStr] + reason: Optional[StrictStr] + limit: Optional[StrictInt] + + async def on_POST( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + requester = await self._auth.get_user_by_req(request) + await assert_user_is_admin(self._auth, requester) + + # parse provided user id to check that it is valid + UserID.from_string(user_id) + + body = parse_and_validate_json_object_from_request(request, self.PostBody) + + limit = body.limit + if limit and limit <= 0: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "If limit is provided it must be a non-negative integer greater than 0.", + ) + + rooms = body.rooms + if not rooms: + current_rooms = list(await self._store.get_rooms_for_user(user_id)) + banned_rooms = list( + await self._store.get_rooms_user_currently_banned_from(user_id) + ) + rooms = current_rooms + banned_rooms + + redact_id = await self.admin_handler.start_redact_events( + user_id, rooms, requester.serialize(), body.reason, limit + ) + + return HTTPStatus.OK, {"redact_id": redact_id} + + +class RedactUserStatus(RestServlet): + """ + Check on the progress of the redaction request represented by the provided ID, returning + the status of the process and a dict of events that were unable to be redacted, if any + """ + + PATTERNS = admin_patterns("/user/redact_status/(?P<redact_id>[^/]*)$") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self.admin_handler = hs.get_admin_handler() async def on_GET( - self, - request: SynapseRequest, - medium: str, - address: str, + self, request: SynapseRequest, redact_id: str ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) - user_id = await self._store.get_user_id_by_threepid(medium, address) + task = await self.admin_handler.get_redact_task(redact_id) + + if task: + if task.status == TaskStatus.ACTIVE: + return HTTPStatus.OK, {"status": TaskStatus.ACTIVE} + elif task.status == TaskStatus.COMPLETE: + assert task.result is not None + failed_redactions = task.result.get("failed_redactions") + return HTTPStatus.OK, { + "status": TaskStatus.COMPLETE, + "failed_redactions": failed_redactions if failed_redactions else {}, + } + elif task.status == TaskStatus.SCHEDULED: + return HTTPStatus.OK, {"status": TaskStatus.SCHEDULED} + else: + return HTTPStatus.OK, { + "status": TaskStatus.FAILED, + "error": ( + task.error + if task.error + else "Unknown error, please check the logs for more information." + ), + } + else: + raise NotFoundError("redact id '%s' not found" % redact_id) - if user_id is None: - raise NotFoundError("User not found") - return HTTPStatus.OK, {"user_id": user_id} +class UserInvitesCount(RestServlet): + """ + Return the count of invites that the user has sent after the given timestamp + """ + + PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/sent_invite_count") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self.store = hs.get_datastores().main + + async def on_GET( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + from_ts = parse_integer(request, "from_ts", required=True) + + sent_invite_count = await self.store.get_sent_invite_count_by_user( + user_id, from_ts + ) + + return HTTPStatus.OK, {"invite_count": sent_invite_count} + + +class UserJoinedRoomCount(RestServlet): + """ + Return the count of rooms that the user has joined at or after the given timestamp, even + if they have subsequently left/been banned from those rooms. + """ + + PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/cumulative_joined_room_count") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self.store = hs.get_datastores().main + + async def on_GET( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + from_ts = parse_integer(request, "from_ts", required=True) + + joined_rooms = await self.store.get_rooms_for_user_by_date(user_id, from_ts) + + return HTTPStatus.OK, {"cumulative_joined_room_count": len(joined_rooms)} diff --git a/synapse/rest/client/_base.py b/synapse/rest/client/_base.py
index 93dec6375a..6cf37869d8 100644 --- a/synapse/rest/client/_base.py +++ b/synapse/rest/client/_base.py
@@ -19,8 +19,8 @@ # # -"""This module contains base REST classes for constructing client v1 servlets. -""" +"""This module contains base REST classes for constructing client v1 servlets.""" + import logging import re from typing import Any, Awaitable, Callable, Iterable, Pattern, Tuple, TypeVar, cast diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py
index 8daa449f9e..455ddda484 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py
@@ -21,28 +21,20 @@ # import logging import random -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, List, Literal, Optional, Tuple from urllib.parse import urlparse -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import StrictBool, StrictStr, constr -else: - from pydantic import StrictBool, StrictStr, constr - import attr -from typing_extensions import Literal from twisted.web.server import Request +from synapse._pydantic_compat import StrictBool, StrictStr, constr from synapse.api.constants import LoginType from synapse.api.errors import ( Codes, InteractiveAuthIncompleteError, NotFoundError, SynapseError, - ThreepidValidationError, ) from synapse.handlers.ui_auth import UIAuthSessionDataConstants from synapse.http.server import HttpServer, finish_request, respond_with_html @@ -54,19 +46,13 @@ from synapse.http.servlet import ( parse_string, ) from synapse.http.site import SynapseRequest -from synapse.metrics import threepid_send_requests -from synapse.push.mailer import Mailer from synapse.types import JsonDict from synapse.types.rest import RequestBodyModel from synapse.types.rest.client import ( AuthenticationData, - ClientSecretStr, - EmailRequestTokenBody, - MsisdnRequestTokenBody, + ClientSecretStr ) -from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import assert_valid_client_secret, random_string -from synapse.util.threepids import check_3pid_allowed, validate_email from ._base import client_patterns, interactive_auth_handler @@ -77,80 +63,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class EmailPasswordRequestTokenRestServlet(RestServlet): - PATTERNS = client_patterns("/account/password/email/requestToken$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.datastore = hs.get_datastores().main - self.config = hs.config - self.identity_handler = hs.get_identity_handler() - - if self.config.email.can_verify_email: - self.mailer = Mailer( - hs=self.hs, - app_name=self.config.email.email_app_name, - template_html=self.config.email.email_password_reset_template_html, - template_text=self.config.email.email_password_reset_template_text, - ) - - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.config.email.can_verify_email: - logger.warning( - "User password resets have been disabled due to lack of email config" - ) - raise SynapseError( - 400, "Email-based password resets have been disabled on this server" - ) - - body = parse_and_validate_json_object_from_request( - request, EmailRequestTokenBody - ) - - if body.next_link: - # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, body.next_link) - - await self.identity_handler.ratelimit_request_token_requests( - request, "email", body.email - ) - - # The email will be sent to the stored address. - # This avoids a potential account hijack by requesting a password reset to - # an email address which is controlled by the attacker but which, after - # canonicalisation, matches the one in our database. - existing_user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( - "email", body.email - ) - - if existing_user_id is None: - if self.config.server.request_token_inhibit_3pid_errors: - # Make the client think the operation succeeded. See the rationale in the - # comments for request_token_inhibit_3pid_errors. - # Also wait for some random amount of time between 100ms and 1s to make it - # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) - return 200, {"sid": random_string(16)} - - raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) - - # Send password reset emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - body.email, - body.client_secret, - body.send_attempt, - self.mailer.send_password_reset_mail, - body.next_link, - ) - threepid_send_requests.labels(type="email", reason="password_reset").observe( - body.send_attempt - ) - - # Wrap the session id in a JSON object - return 200, {"sid": sid} - - class PasswordRestServlet(RestServlet): PATTERNS = client_patterns("/account/password$") @@ -211,30 +123,8 @@ class PasswordRestServlet(RestServlet): "modify your account password", ) - if LoginType.EMAIL_IDENTITY in result: - threepid = result[LoginType.EMAIL_IDENTITY] - if "medium" not in threepid or "address" not in threepid: - raise SynapseError(500, "Malformed threepid") - if threepid["medium"] == "email": - # For emails, canonicalise the address. - # We store all email addresses canonicalised in the DB. - # (See add_threepid in synapse/handlers/auth.py) - try: - threepid["address"] = validate_email(threepid["address"]) - except ValueError as e: - raise SynapseError(400, str(e)) - # if using email, we must know about the email they're authing with! - threepid_user_id = await self.datastore.get_user_id_by_threepid( - threepid["medium"], threepid["address"] - ) - if not threepid_user_id: - raise SynapseError( - 404, "Email address not found", Codes.NOT_FOUND - ) - user_id = threepid_user_id - else: - logger.error("Auth succeeded but no known type! %r", result.keys()) - raise SynapseError(500, "", Codes.UNKNOWN) + logger.error("Auth succeeded but no known type (hint: 3PID auth was removed)! %r", result.keys()) + raise SynapseError(500, "", Codes.UNKNOWN) except InteractiveAuthIncompleteError as e: # The user needs to provide more steps to complete auth, but @@ -326,486 +216,6 @@ class DeactivateAccountRestServlet(RestServlet): return 200, {"id_server_unbind_result": id_server_unbind_result} -class EmailThreepidRequestTokenRestServlet(RestServlet): - PATTERNS = client_patterns("/account/3pid/email/requestToken$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.config = hs.config - self.identity_handler = hs.get_identity_handler() - self.store = self.hs.get_datastores().main - - if self.config.email.can_verify_email: - self.mailer = Mailer( - hs=self.hs, - app_name=self.config.email.email_app_name, - template_html=self.config.email.email_add_threepid_template_html, - template_text=self.config.email.email_add_threepid_template_text, - ) - - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.hs.config.registration.enable_3pid_changes: - raise SynapseError( - 400, "3PID changes are disabled on this server", Codes.FORBIDDEN - ) - - if not self.config.email.can_verify_email: - logger.warning( - "Adding emails have been disabled due to lack of an email config" - ) - raise SynapseError( - 400, - "Adding an email to your account is disabled on this server", - ) - - body = parse_and_validate_json_object_from_request( - request, EmailRequestTokenBody - ) - - if not await check_3pid_allowed(self.hs, "email", body.email): - raise SynapseError( - 403, - "Your email domain is not authorized on this server", - Codes.THREEPID_DENIED, - ) - - await self.identity_handler.ratelimit_request_token_requests( - request, "email", body.email - ) - - if body.next_link: - # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, body.next_link) - - existing_user_id = await self.store.get_user_id_by_threepid("email", body.email) - - if existing_user_id is not None: - if self.config.server.request_token_inhibit_3pid_errors: - # Make the client think the operation succeeded. See the rationale in the - # comments for request_token_inhibit_3pid_errors. - # Also wait for some random amount of time between 100ms and 1s to make it - # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) - return 200, {"sid": random_string(16)} - - raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - - # Send threepid validation emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - body.email, - body.client_secret, - body.send_attempt, - self.mailer.send_add_threepid_mail, - body.next_link, - ) - - threepid_send_requests.labels(type="email", reason="add_threepid").observe( - body.send_attempt - ) - - # Wrap the session id in a JSON object - return 200, {"sid": sid} - - -class MsisdnThreepidRequestTokenRestServlet(RestServlet): - PATTERNS = client_patterns("/account/3pid/msisdn/requestToken$") - - def __init__(self, hs: "HomeServer"): - self.hs = hs - super().__init__() - self.store = self.hs.get_datastores().main - self.identity_handler = hs.get_identity_handler() - - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - body = parse_and_validate_json_object_from_request( - request, MsisdnRequestTokenBody - ) - msisdn = phone_number_to_msisdn(body.country, body.phone_number) - logger.info("Request #%s to verify ownership of %s", body.send_attempt, msisdn) - - if not await check_3pid_allowed(self.hs, "msisdn", msisdn): - raise SynapseError( - 403, - # TODO: is this error message accurate? Looks like we've only rejected - # this phone number, not necessarily all phone numbers - "Account phone numbers are not authorized on this server", - Codes.THREEPID_DENIED, - ) - - await self.identity_handler.ratelimit_request_token_requests( - request, "msisdn", msisdn - ) - - if body.next_link: - # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, body.next_link) - - existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn) - - if existing_user_id is not None: - if self.hs.config.server.request_token_inhibit_3pid_errors: - # Make the client think the operation succeeded. See the rationale in the - # comments for request_token_inhibit_3pid_errors. - # Also wait for some random amount of time between 100ms and 1s to make it - # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) - return 200, {"sid": random_string(16)} - - logger.info("MSISDN %s is already in use by %s", msisdn, existing_user_id) - raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) - - if not self.hs.config.registration.account_threepid_delegate_msisdn: - logger.warning( - "No upstream msisdn account_threepid_delegate configured on the server to " - "handle this request" - ) - raise SynapseError( - 400, - "Adding phone numbers to user account is not supported by this homeserver", - ) - - ret = await self.identity_handler.requestMsisdnToken( - self.hs.config.registration.account_threepid_delegate_msisdn, - body.country, - body.phone_number, - body.client_secret, - body.send_attempt, - body.next_link, - ) - - threepid_send_requests.labels(type="msisdn", reason="add_threepid").observe( - body.send_attempt - ) - logger.info("MSISDN %s: got response from identity server: %s", msisdn, ret) - - return 200, ret - - -class AddThreepidEmailSubmitTokenServlet(RestServlet): - """Handles 3PID validation token submission for adding an email to a user's account""" - - PATTERNS = client_patterns( - "/add_threepid/email/submit_token$", releases=(), unstable=True - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.config = hs.config - self.clock = hs.get_clock() - self.store = hs.get_datastores().main - if self.config.email.can_verify_email: - self._failure_email_template = ( - self.config.email.email_add_threepid_template_failure_html - ) - - async def on_GET(self, request: Request) -> None: - if not self.config.email.can_verify_email: - logger.warning( - "Adding emails have been disabled due to lack of an email config" - ) - raise SynapseError( - 400, "Adding an email to your account is disabled on this server" - ) - - sid = parse_string(request, "sid", required=True) - token = parse_string(request, "token", required=True) - client_secret = parse_string(request, "client_secret", required=True) - assert_valid_client_secret(client_secret) - - # Attempt to validate a 3PID session - try: - # Mark the session as valid - next_link = await self.store.validate_threepid_session( - sid, client_secret, token, self.clock.time_msec() - ) - - # Perform a 302 redirect if next_link is set - if next_link: - request.setResponseCode(302) - request.setHeader("Location", next_link) - finish_request(request) - return None - - # Otherwise show the success template - html = self.config.email.email_add_threepid_template_success_html_content - status_code = 200 - except ThreepidValidationError as e: - status_code = e.code - - # Show a failure page with a reason - template_vars = {"failure_reason": e.msg} - html = self._failure_email_template.render(**template_vars) - - respond_with_html(request, status_code, html) - - -class AddThreepidMsisdnSubmitTokenServlet(RestServlet): - """Handles 3PID validation token submission for adding a phone number to a user's - account - """ - - PATTERNS = client_patterns( - "/add_threepid/msisdn/submit_token$", releases=(), unstable=True - ) - - class PostBody(RequestBodyModel): - client_secret: ClientSecretStr - sid: StrictStr - token: StrictStr - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.config = hs.config - self.clock = hs.get_clock() - self.store = hs.get_datastores().main - self.identity_handler = hs.get_identity_handler() - - async def on_POST(self, request: Request) -> Tuple[int, JsonDict]: - if not self.config.registration.account_threepid_delegate_msisdn: - raise SynapseError( - 400, - "This homeserver is not validating phone numbers. Use an identity server " - "instead.", - ) - - body = parse_and_validate_json_object_from_request(request, self.PostBody) - - # Proxy submit_token request to msisdn threepid delegate - response = await self.identity_handler.proxy_msisdn_submit_token( - self.config.registration.account_threepid_delegate_msisdn, - body.client_secret, - body.sid, - body.token, - ) - return 200, response - - -class ThreepidRestServlet(RestServlet): - PATTERNS = client_patterns("/account/3pid$") - # This is used as a proxy for all the 3pid endpoints. - - CATEGORY = "Client API requests" - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.identity_handler = hs.get_identity_handler() - self.auth = hs.get_auth() - self.auth_handler = hs.get_auth_handler() - self.datastore = self.hs.get_datastores().main - - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - - threepids = await self.datastore.user_get_threepids(requester.user.to_string()) - - return 200, {"threepids": [attr.asdict(t) for t in threepids]} - - # NOTE(dmr): I have chosen not to use Pydantic to parse this request's body, because - # the endpoint is deprecated. (If you really want to, you could do this by reusing - # ThreePidBindRestServelet.PostBody with an `alias_generator` to handle - # `threePidCreds` versus `three_pid_creds`. - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if self.hs.config.experimental.msc3861.enabled: - raise NotFoundError(errcode=Codes.UNRECOGNIZED) - - if not self.hs.config.registration.enable_3pid_changes: - raise SynapseError( - 400, "3PID changes are disabled on this server", Codes.FORBIDDEN - ) - - requester = await self.auth.get_user_by_req(request) - user_id = requester.user.to_string() - body = parse_json_object_from_request(request) - - threepid_creds = body.get("threePidCreds") or body.get("three_pid_creds") - if threepid_creds is None: - raise SynapseError( - 400, "Missing param three_pid_creds", Codes.MISSING_PARAM - ) - assert_params_in_dict(threepid_creds, ["client_secret", "sid"]) - - sid = threepid_creds["sid"] - client_secret = threepid_creds["client_secret"] - assert_valid_client_secret(client_secret) - - validation_session = await self.identity_handler.validate_threepid_session( - client_secret, sid - ) - if validation_session: - await self.auth_handler.add_threepid( - user_id, - validation_session["medium"], - validation_session["address"], - validation_session["validated_at"], - ) - return 200, {} - - raise SynapseError( - 400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED - ) - - -class ThreepidAddRestServlet(RestServlet): - PATTERNS = client_patterns("/account/3pid/add$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.identity_handler = hs.get_identity_handler() - self.auth = hs.get_auth() - self.auth_handler = hs.get_auth_handler() - - class PostBody(RequestBodyModel): - auth: Optional[AuthenticationData] = None - client_secret: ClientSecretStr - sid: StrictStr - - @interactive_auth_handler - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.hs.config.registration.enable_3pid_changes: - raise SynapseError( - 400, "3PID changes are disabled on this server", Codes.FORBIDDEN - ) - - requester = await self.auth.get_user_by_req(request) - user_id = requester.user.to_string() - body = parse_and_validate_json_object_from_request(request, self.PostBody) - - await self.auth_handler.validate_user_via_ui_auth( - requester, - request, - body.dict(exclude_unset=True), - "add a third-party identifier to your account", - ) - - validation_session = await self.identity_handler.validate_threepid_session( - body.client_secret, body.sid - ) - if validation_session: - await self.auth_handler.add_threepid( - user_id, - validation_session["medium"], - validation_session["address"], - validation_session["validated_at"], - ) - return 200, {} - - raise SynapseError( - 400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED - ) - - -class ThreepidBindRestServlet(RestServlet): - PATTERNS = client_patterns("/account/3pid/bind$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.identity_handler = hs.get_identity_handler() - self.auth = hs.get_auth() - - class PostBody(RequestBodyModel): - client_secret: ClientSecretStr - id_access_token: StrictStr - id_server: StrictStr - sid: StrictStr - - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - body = parse_and_validate_json_object_from_request(request, self.PostBody) - - requester = await self.auth.get_user_by_req(request) - user_id = requester.user.to_string() - - await self.identity_handler.bind_threepid( - body.client_secret, body.sid, user_id, body.id_server, body.id_access_token - ) - - return 200, {} - - -class ThreepidUnbindRestServlet(RestServlet): - PATTERNS = client_patterns("/account/3pid/unbind$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.identity_handler = hs.get_identity_handler() - self.auth = hs.get_auth() - self.datastore = self.hs.get_datastores().main - - class PostBody(RequestBodyModel): - address: StrictStr - id_server: Optional[StrictStr] = None - medium: Literal["email", "msisdn"] - - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - """Unbind the given 3pid from a specific identity server, or identity servers that are - known to have this 3pid bound - """ - requester = await self.auth.get_user_by_req(request) - body = parse_and_validate_json_object_from_request(request, self.PostBody) - - # Attempt to unbind the threepid from an identity server. If id_server is None, try to - # unbind from all identity servers this threepid has been added to in the past - result = await self.identity_handler.try_unbind_threepid( - requester.user.to_string(), body.medium, body.address, body.id_server - ) - return 200, {"id_server_unbind_result": "success" if result else "no-support"} - - -class ThreepidDeleteRestServlet(RestServlet): - PATTERNS = client_patterns("/account/3pid/delete$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.auth = hs.get_auth() - self.auth_handler = hs.get_auth_handler() - - class PostBody(RequestBodyModel): - address: StrictStr - id_server: Optional[StrictStr] = None - medium: Literal["email", "msisdn"] - - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.hs.config.registration.enable_3pid_changes: - raise SynapseError( - 400, "3PID changes are disabled on this server", Codes.FORBIDDEN - ) - - body = parse_and_validate_json_object_from_request(request, self.PostBody) - - requester = await self.auth.get_user_by_req(request) - user_id = requester.user.to_string() - - try: - # Attempt to remove any known bindings of this third-party ID - # and user ID from identity servers. - ret = await self.hs.get_identity_handler().try_unbind_threepid( - user_id, body.medium, body.address, body.id_server - ) - except Exception: - # NB. This endpoint should succeed if there is nothing to - # delete, so it should only throw if something is wrong - # that we ought to care about. - logger.exception("Failed to remove threepid") - raise SynapseError(500, "Failed to remove threepid") - - if ret: - id_server_unbind_result = "success" - else: - id_server_unbind_result = "no-support" - - # Delete the local association of this user ID and third-party ID. - await self.auth_handler.delete_local_threepid( - user_id, body.medium, body.address - ) - - return 200, {"id_server_unbind_result": id_server_unbind_result} - - def assert_valid_next_link(hs: "HomeServer", next_link: str) -> None: """ Raises a SynapseError if a given next_link value is invalid @@ -901,20 +311,8 @@ class AccountStatusRestServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.worker.worker_app is None: if not hs.config.experimental.msc3861.enabled: - EmailPasswordRequestTokenRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) - EmailThreepidRequestTokenRestServlet(hs).register(http_server) - MsisdnThreepidRequestTokenRestServlet(hs).register(http_server) - AddThreepidEmailSubmitTokenServlet(hs).register(http_server) - AddThreepidMsisdnSubmitTokenServlet(hs).register(http_server) - ThreepidRestServlet(hs).register(http_server) - if hs.config.worker.worker_app is None: - ThreepidBindRestServlet(hs).register(http_server) - ThreepidUnbindRestServlet(hs).register(http_server) - if not hs.config.experimental.msc3861.enabled: - ThreepidAddRestServlet(hs).register(http_server) - ThreepidDeleteRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None and hs.config.experimental.msc3720_enabled: diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py
index 0ee24081fa..734c9e992f 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py
@@ -108,9 +108,9 @@ class AccountDataServlet(RestServlet): # Push rules are stored in a separate table and must be queried separately. if account_data_type == AccountDataTypes.PUSH_RULES: - account_data: Optional[JsonMapping] = ( - await self._push_rules_handler.push_rules_for_user(requester.user) - ) + account_data: Optional[ + JsonMapping + ] = await self._push_rules_handler.push_rules_for_user(requester.user) else: account_data = await self.store.get_global_account_data_by_type_for_user( user_id, account_data_type diff --git a/synapse/rest/client/account_validity.py b/synapse/rest/client/account_validity.py
index 6222a5cc37..ec7836b647 100644 --- a/synapse/rest/client/account_validity.py +++ b/synapse/rest/client/account_validity.py
@@ -48,9 +48,7 @@ class AccountValidityRenewServlet(RestServlet): self.account_renewed_template = ( hs.config.account_validity.account_validity_account_renewed_template ) - self.account_previously_renewed_template = ( - hs.config.account_validity.account_validity_account_previously_renewed_template - ) + self.account_previously_renewed_template = hs.config.account_validity.account_validity_account_previously_renewed_template self.invalid_token_template = ( hs.config.account_validity.account_validity_invalid_token_template ) diff --git a/synapse/rest/client/appservice_ping.py b/synapse/rest/client/appservice_ping.py
index d6b4e32453..1f9662a95a 100644 --- a/synapse/rest/client/appservice_ping.py +++ b/synapse/rest/client/appservice_ping.py
@@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2023 Tulir Asokan -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023, 2025 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -53,6 +53,7 @@ class AppservicePingRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.as_api = hs.get_application_service_api() + self.scheduler = hs.get_application_service_scheduler() self.auth = hs.get_auth() async def on_POST( @@ -85,6 +86,10 @@ class AppservicePingRestServlet(RestServlet): start = time.monotonic() try: await self.as_api.ping(requester.app_service, txn_id) + + # We got a OK response, so if the AS needs to be recovered then lets recover it now. + # This sets off a task in the background and so is safe to execute and forget. + self.scheduler.txn_ctrl.force_retry(requester.app_service) except RequestTimedOutError as e: raise SynapseError( HTTPStatus.GATEWAY_TIMEOUT, diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py
index 4221f35937..b8dca7c797 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py
@@ -20,14 +20,14 @@ # import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, cast from twisted.web.server import Request from synapse.api.constants import LoginType from synapse.api.errors import LoginError, SynapseError from synapse.api.urls import CLIENT_API_PREFIX -from synapse.http.server import HttpServer, respond_with_html +from synapse.http.server import HttpServer, respond_with_html, respond_with_redirect from synapse.http.servlet import RestServlet, parse_string from synapse.http.site import SynapseRequest @@ -66,6 +66,23 @@ class AuthRestServlet(RestServlet): if not session: raise SynapseError(400, "No session supplied") + if ( + self.hs.config.experimental.msc3861.enabled + and stagetype == "org.matrix.cross_signing_reset" + ): + # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self.auth) + + url = await auth.account_management_url() + if url is not None: + url = f"{url}?action=org.matrix.cross_signing_reset" + else: + url = await auth.issuer() + respond_with_redirect(request, str.encode(url)) + if stagetype == LoginType.RECAPTCHA: html = self.recaptcha_template.render( session=session, diff --git a/synapse/rest/client/auth_issuer.py b/synapse/rest/client/auth_metadata.py
index 77b9720956..5444a89be6 100644 --- a/synapse/rest/client/auth_issuer.py +++ b/synapse/rest/client/auth_metadata.py
@@ -13,7 +13,7 @@ # limitations under the License. import logging import typing -from typing import Tuple +from typing import Tuple, cast from synapse.api.errors import Codes, SynapseError from synapse.http.server import HttpServer @@ -32,6 +32,8 @@ logger = logging.getLogger(__name__) class AuthIssuerServlet(RestServlet): """ Advertises what OpenID Connect issuer clients should use to authorise users. + This endpoint was defined in a previous iteration of MSC2965, and is still + used by some clients. """ PATTERNS = client_patterns( @@ -43,10 +45,16 @@ class AuthIssuerServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self._config = hs.config + self._auth = hs.get_auth() async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if self._config.experimental.msc3861.enabled: - return 200, {"issuer": self._config.experimental.msc3861.issuer} + # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self._auth) + return 200, {"issuer": await auth.issuer()} else: # Wouldn't expect this to be reached: the servelet shouldn't have been # registered. Still, fail gracefully if we are registered for some reason. @@ -57,7 +65,42 @@ class AuthIssuerServlet(RestServlet): ) +class AuthMetadataServlet(RestServlet): + """ + Advertises the OAuth 2.0 server metadata for the homeserver. + """ + + PATTERNS = client_patterns( + "/org.matrix.msc2965/auth_metadata$", + unstable=True, + releases=(), + ) + + def __init__(self, hs: "HomeServer"): + super().__init__() + self._config = hs.config + self._auth = hs.get_auth() + + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + if self._config.experimental.msc3861.enabled: + # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self._auth) + return 200, await auth.auth_metadata() + else: + # Wouldn't expect this to be reached: the servlet shouldn't have been + # registered. Still, fail gracefully if we are registered for some reason. + raise SynapseError( + 404, + "OIDC discovery has not been configured on this homeserver", + Codes.NOT_FOUND, + ) + + def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: # We use the MSC3861 values as they are used by multiple MSCs if hs.config.experimental.msc3861.enabled: AuthIssuerServlet(hs).register(http_server) + AuthMetadataServlet(hs).register(http_server) diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py
index 63b8a9364a..caac5826a4 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py
@@ -21,7 +21,7 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, Tuple -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet from synapse.http.site import SynapseRequest @@ -69,7 +69,7 @@ class CapabilitiesRestServlet(RestServlet): "enabled": self.config.registration.enable_set_avatar_url }, "m.3pid_changes": { - "enabled": self.config.registration.enable_3pid_changes + "enabled": False }, "m.get_login_token": { "enabled": self.config.auth.login_via_existing_enabled, @@ -77,11 +77,6 @@ class CapabilitiesRestServlet(RestServlet): } } - if self.config.experimental.msc3244_enabled: - response["capabilities"]["m.room_versions"][ - "org.matrix.msc3244.room_capabilities" - ] = MSC3244_CAPABILITIES - if self.config.experimental.msc3720_enabled: response["capabilities"]["org.matrix.msc3720.account_status"] = { "enabled": True, @@ -92,6 +87,23 @@ class CapabilitiesRestServlet(RestServlet): "enabled": self.config.experimental.msc3664_enabled, } + if self.config.experimental.msc4133_enabled: + response["capabilities"]["uk.tcpip.msc4133.profile_fields"] = { + "enabled": True, + } + + # Ensure this is consistent with the legacy m.set_displayname and + # m.set_avatar_url. + disallowed = [] + if not self.config.registration.enable_set_displayname: + disallowed.append("displayname") + if not self.config.registration.enable_set_avatar_url: + disallowed.append("avatar_url") + if disallowed: + response["capabilities"]["uk.tcpip.msc4133.profile_fields"][ + "disallowed" + ] = disallowed + return HTTPStatus.OK, response diff --git a/synapse/rest/client/delayed_events.py b/synapse/rest/client/delayed_events.py new file mode 100644
index 0000000000..2dd5a60b2b --- /dev/null +++ b/synapse/rest/client/delayed_events.py
@@ -0,0 +1,111 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +# This module contains REST servlets to do with delayed events: /delayed_events/<paths> + +import logging +from enum import Enum +from http import HTTPStatus +from typing import TYPE_CHECKING, Tuple + +from synapse.api.errors import Codes, SynapseError +from synapse.http.server import HttpServer +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.http.site import SynapseRequest +from synapse.rest.client._base import client_patterns +from synapse.types import JsonDict + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class _UpdateDelayedEventAction(Enum): + CANCEL = "cancel" + RESTART = "restart" + SEND = "send" + + +class UpdateDelayedEventServlet(RestServlet): + PATTERNS = client_patterns( + r"/org\.matrix\.msc4140/delayed_events/(?P<delay_id>[^/]+)$", + releases=(), + ) + CATEGORY = "Delayed event management requests" + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.auth = hs.get_auth() + self.delayed_events_handler = hs.get_delayed_events_handler() + + async def on_POST( + self, request: SynapseRequest, delay_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + + body = parse_json_object_from_request(request) + try: + action = str(body["action"]) + except KeyError: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "'action' is missing", + Codes.MISSING_PARAM, + ) + try: + enum_action = _UpdateDelayedEventAction(action) + except ValueError: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "'action' is not one of " + + ", ".join(f"'{m.value}'" for m in _UpdateDelayedEventAction), + Codes.INVALID_PARAM, + ) + + if enum_action == _UpdateDelayedEventAction.CANCEL: + await self.delayed_events_handler.cancel(requester, delay_id) + elif enum_action == _UpdateDelayedEventAction.RESTART: + await self.delayed_events_handler.restart(requester, delay_id) + elif enum_action == _UpdateDelayedEventAction.SEND: + await self.delayed_events_handler.send(requester, delay_id) + return 200, {} + + +class DelayedEventsServlet(RestServlet): + PATTERNS = client_patterns( + r"/org\.matrix\.msc4140/delayed_events$", + releases=(), + ) + CATEGORY = "Delayed event management requests" + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.auth = hs.get_auth() + self.delayed_events_handler = hs.get_delayed_events_handler() + + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + # TODO: Support Pagination stream API ("from" query parameter) + delayed_events = await self.delayed_events_handler.get_all_for_user(requester) + + ret = {"delayed_events": delayed_events} + return 200, ret + + +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + # The following can't currently be instantiated on workers. + if hs.config.worker.worker_app is None: + UpdateDelayedEventServlet(hs).register(http_server) + DelayedEventsServlet(hs).register(http_server) diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py
index 8313d687b7..0b075cc2f2 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py
@@ -24,13 +24,7 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, List, Optional, Tuple -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import Extra, StrictStr -else: - from pydantic import Extra, StrictStr - +from synapse._pydantic_compat import Extra, StrictStr from synapse.api import errors from synapse.api.errors import NotFoundError, SynapseError, UnrecognizedRequestError from synapse.handlers.device import DeviceHandler @@ -120,15 +114,19 @@ class DeleteDevicesRestServlet(RestServlet): else: raise e - await self.auth_handler.validate_user_via_ui_auth( - requester, - request, - body.dict(exclude_unset=True), - "remove device(s) from your account", - # Users might call this multiple times in a row while cleaning up - # devices, allow a single UI auth session to be re-used. - can_skip_ui_auth=True, - ) + if requester.app_service and requester.app_service.msc4190_device_management: + # MSC4190 can skip UIA for this endpoint + pass + else: + await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body.dict(exclude_unset=True), + "remove device(s) from your account", + # Users might call this multiple times in a row while cleaning up + # devices, allow a single UI auth session to be re-used. + can_skip_ui_auth=True, + ) await self.device_handler.delete_devices( requester.user.to_string(), body.devices @@ -145,11 +143,11 @@ class DeviceRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() handler = hs.get_device_handler() - assert isinstance(handler, DeviceHandler) self.device_handler = handler self.auth_handler = hs.get_auth_handler() self._msc3852_enabled = hs.config.experimental.msc3852_enabled self._msc3861_oauth_delegation_enabled = hs.config.experimental.msc3861.enabled + self._is_main_process = hs.config.worker.worker_app is None async def on_GET( self, request: SynapseRequest, device_id: str @@ -181,8 +179,13 @@ class DeviceRestServlet(RestServlet): async def on_DELETE( self, request: SynapseRequest, device_id: str ) -> Tuple[int, JsonDict]: - if self._msc3861_oauth_delegation_enabled: - raise UnrecognizedRequestError(code=404) + # Can only be run on main process, as changes to device lists must + # happen on main. + if not self._is_main_process: + error_message = "DELETE on /devices/ must be routed to main process" + logger.error(error_message) + raise SynapseError(500, error_message) + assert isinstance(self.device_handler, DeviceHandler) requester = await self.auth.get_user_by_req(request) @@ -198,15 +201,24 @@ class DeviceRestServlet(RestServlet): else: raise - await self.auth_handler.validate_user_via_ui_auth( - requester, - request, - body.dict(exclude_unset=True), - "remove a device from your account", - # Users might call this multiple times in a row while cleaning up - # devices, allow a single UI auth session to be re-used. - can_skip_ui_auth=True, - ) + if requester.app_service and requester.app_service.msc4190_device_management: + # MSC4190 allows appservices to delete devices through this endpoint without UIA + # It's also allowed with MSC3861 enabled + pass + + else: + if self._msc3861_oauth_delegation_enabled: + raise UnrecognizedRequestError(code=404) + + await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body.dict(exclude_unset=True), + "remove a device from your account", + # Users might call this multiple times in a row while cleaning up + # devices, allow a single UI auth session to be re-used. + can_skip_ui_auth=True, + ) await self.device_handler.delete_devices( requester.user.to_string(), [device_id] @@ -219,9 +231,27 @@ class DeviceRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, device_id: str ) -> Tuple[int, JsonDict]: + # Can only be run on main process, as changes to device lists must + # happen on main. + if not self._is_main_process: + error_message = "PUT on /devices/ must be routed to main process" + logger.error(error_message) + raise SynapseError(500, error_message) + assert isinstance(self.device_handler, DeviceHandler) + requester = await self.auth.get_user_by_req(request, allow_guest=True) body = parse_and_validate_json_object_from_request(request, self.PutBody) + + # MSC4190 allows appservices to create devices through this endpoint + if requester.app_service and requester.app_service.msc4190_device_management: + created = await self.device_handler.upsert_device( + user_id=requester.user.to_string(), + device_id=device_id, + display_name=body.display_name, + ) + return 201 if created else 200, {} + await self.device_handler.update_device( requester.user.to_string(), device_id, body.dict() ) @@ -571,9 +601,9 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ): DeleteDevicesRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) + DeviceRestServlet(hs).register(http_server) if hs.config.worker.worker_app is None: - DeviceRestServlet(hs).register(http_server) if hs.config.experimental.msc2697_enabled: DehydratedDeviceServlet(hs).register(http_server) ClaimDehydratedDeviceServlet(hs).register(http_server) diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py
index 11fdd0f7c6..479f489623 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py
@@ -20,19 +20,11 @@ # import logging -from typing import TYPE_CHECKING, List, Optional, Tuple - -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import StrictStr -else: - from pydantic import StrictStr - -from typing_extensions import Literal +from typing import TYPE_CHECKING, List, Literal, Optional, Tuple from twisted.web.server import Request +from synapse._pydantic_compat import StrictStr from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import ( diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py
index 613890061e..ad23cc76ce 100644 --- a/synapse/rest/client/events.py +++ b/synapse/rest/client/events.py
@@ -20,6 +20,7 @@ # """This module contains REST servlets to do with event streaming, /events.""" + import logging from typing import TYPE_CHECKING, Dict, List, Tuple, Union diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py
index eddad7d5b8..7025662fdc 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py
@@ -23,10 +23,13 @@ import logging import re from collections import Counter -from http import HTTPStatus -from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, cast -from synapse.api.errors import Codes, InvalidAPICallError, SynapseError +from synapse.api.errors import ( + InteractiveAuthIncompleteError, + InvalidAPICallError, + SynapseError, +) from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -403,17 +406,36 @@ class SigningKeyUploadServlet(RestServlet): # explicitly mark the master key as replaceable. if self.hs.config.experimental.msc3861.enabled: if not master_key_updatable_without_uia: - config = self.hs.config.experimental.msc3861 - if config.account_management_url is not None: - url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset" + # If MSC3861 is enabled, we can assume self.auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self.auth) + + uri = await auth.account_management_url() + if uri is not None: + url = f"{uri}?action=org.matrix.cross_signing_reset" else: - url = config.issuer + url = await auth.issuer() - raise SynapseError( - HTTPStatus.NOT_IMPLEMENTED, - "To reset your end-to-end encryption cross-signing identity, " - f"you first need to approve it at {url} and then try again.", - Codes.UNRECOGNIZED, + # We use a dummy session ID as this isn't really a UIA flow, but we + # reuse the same API shape for better client compatibility. + raise InteractiveAuthIncompleteError( + "dummy", + { + "session": "dummy", + "flows": [ + {"stages": ["org.matrix.cross_signing_reset"]}, + ], + "params": { + "org.matrix.cross_signing_reset": { + "url": url, + }, + }, + "msg": "To reset your end-to-end encryption cross-signing " + f"identity, you first need to approve it at {url} and " + "then try again.", + }, ) else: # Without MSC3861, we require UIA. diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py
index e31687fc13..d7a17e1b35 100644 --- a/synapse/rest/client/knock.py +++ b/synapse/rest/client/knock.py
@@ -53,7 +53,6 @@ class KnockRoomAliasServlet(RestServlet): super().__init__() self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() - self._support_via = hs.config.experimental.msc4156_enabled async def on_POST( self, @@ -72,15 +71,11 @@ class KnockRoomAliasServlet(RestServlet): # twisted.web.server.Request.args is incorrectly defined as Optional[Any] args: Dict[bytes, List[bytes]] = request.args # type: ignore - remote_room_hosts = parse_strings_from_args( - args, "server_name", required=False - ) - if self._support_via: + # Prefer via over server_name (deprecated with MSC4156) + remote_room_hosts = parse_strings_from_args(args, "via", required=False) + if remote_room_hosts is None: remote_room_hosts = parse_strings_from_args( - args, - "org.matrix.msc4156.via", - default=remote_room_hosts, - required=False, + args, "server_name", required=False ) elif RoomAlias.is_valid(room_identifier): handler = self.room_member_handler diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py
index ae691bcdba..cc6863cadc 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py
@@ -30,11 +30,10 @@ from typing import ( List, Optional, Tuple, + TypedDict, Union, ) -from typing_extensions import TypedDict - from synapse.api.constants import ApprovalNoticeMedium from synapse.api.errors import ( Codes, @@ -82,7 +81,6 @@ class LoginRestServlet(RestServlet): PATTERNS = client_patterns("/login$", v1=True) CATEGORY = "Registration/login requests" - CAS_TYPE = "m.login.cas" SSO_TYPE = "m.login.sso" TOKEN_TYPE = "m.login.token" JWT_TYPE = "org.matrix.login.jwt" @@ -98,8 +96,6 @@ class LoginRestServlet(RestServlet): self.jwt_enabled = hs.config.jwt.jwt_enabled # SSO configuration. - self.saml2_enabled = hs.config.saml2.saml2_enabled - self.cas_enabled = hs.config.cas.cas_enabled self.oidc_enabled = hs.config.oidc.oidc_enabled self._refresh_tokens_enabled = ( hs.config.registration.refreshable_access_token_lifetime is not None @@ -136,7 +132,7 @@ class LoginRestServlet(RestServlet): cfg=self.hs.config.ratelimiting.rc_login_account, ) - # ensure the CAS/SAML/OIDC handlers are loaded on this worker instance. + # ensure the OIDC handlers are loaded on this worker instance. # The reason for this is to ensure that the auth_provider_ids are registered # with SsoHandler, which in turn ensures that the login/registration prometheus # counters are initialised for the auth_provider_ids. @@ -147,15 +143,10 @@ class LoginRestServlet(RestServlet): if self.jwt_enabled: flows.append({"type": LoginRestServlet.JWT_TYPE}) - if self.cas_enabled: - # we advertise CAS for backwards compat, though MSC1721 renamed it - # to SSO. - flows.append({"type": LoginRestServlet.CAS_TYPE}) - # The login token flow requires m.login.token to be advertised. support_login_token_flow = self._get_login_token_enabled - if self.cas_enabled or self.saml2_enabled or self.oidc_enabled: + if self.oidc_enabled: flows.append( { "type": LoginRestServlet.SSO_TYPE, @@ -268,7 +259,7 @@ class LoginRestServlet(RestServlet): approval_notice_medium=ApprovalNoticeMedium.NONE, ) - well_known_data = self._well_known_builder.get_well_known() + well_known_data = await self._well_known_builder.get_well_known() if well_known_data: result["well_known"] = well_known_data return 200, result @@ -325,7 +316,7 @@ class LoginRestServlet(RestServlet): *, request_info: RequestInfo, ) -> LoginResponse: - """Handle non-token/saml/jwt logins + """Handle non-token/jwt logins Args: login_submission: @@ -363,6 +354,7 @@ class LoginRestServlet(RestServlet): login_submission: JsonDict, callback: Optional[Callable[[LoginResponse], Awaitable[None]]] = None, create_non_existent_users: bool = False, + default_display_name: Optional[str] = None, ratelimit: bool = True, auth_provider_id: Optional[str] = None, should_issue_refresh_token: bool = False, @@ -410,7 +402,8 @@ class LoginRestServlet(RestServlet): canonical_uid = await self.auth_handler.check_user_exists(user_id) if not canonical_uid: canonical_uid = await self.registration_handler.register_user( - localpart=UserID.from_string(user_id).localpart + localpart=UserID.from_string(user_id).localpart, + default_display_name=default_display_name, ) user_id = canonical_uid @@ -546,11 +539,14 @@ class LoginRestServlet(RestServlet): Returns: The body of the JSON response. """ - user_id = self.hs.get_jwt_handler().validate_login(login_submission) + user_id, default_display_name = self.hs.get_jwt_handler().validate_login( + login_submission + ) return await self._complete_login( user_id, login_submission, create_non_existent_users=True, + default_display_name=default_display_name, should_issue_refresh_token=should_issue_refresh_token, request_info=request_info, ) @@ -622,7 +618,7 @@ class RefreshTokenServlet(RestServlet): class SsoRedirectServlet(RestServlet): - PATTERNS = list(client_patterns("/login/(cas|sso)/redirect$", v1=True)) + [ + PATTERNS = list(client_patterns("/login/sso/redirect$", v1=True)) + [ re.compile( "^" + CLIENT_API_PREFIX @@ -679,31 +675,6 @@ class SsoRedirectServlet(RestServlet): finish_request(request) -class CasTicketServlet(RestServlet): - PATTERNS = client_patterns("/login/cas/ticket", v1=True) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self._cas_handler = hs.get_cas_handler() - - async def on_GET(self, request: SynapseRequest) -> None: - client_redirect_url = parse_string(request, "redirectUrl") - ticket = parse_string(request, "ticket", required=True) - - # Maybe get a session ID (if this ticket is from user interactive - # authentication). - session = parse_string(request, "session") - - # Either client_redirect_url or session must be provided. - if not client_redirect_url and not session: - message = "Missing string query parameter redirectUrl or session" - raise SynapseError(400, message, errcode=Codes.MISSING_PARAM) - - await self._cas_handler.handle_ticket( - request, ticket, client_redirect_url, session - ) - - def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.experimental.msc3861.enabled: return @@ -715,26 +686,18 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ): RefreshTokenServlet(hs).register(http_server) if ( - hs.config.cas.cas_enabled - or hs.config.saml2.saml2_enabled - or hs.config.oidc.oidc_enabled + hs.config.oidc.oidc_enabled ): SsoRedirectServlet(hs).register(http_server) - if hs.config.cas.cas_enabled: - CasTicketServlet(hs).register(http_server) def _load_sso_handlers(hs: "HomeServer") -> None: """Ensure that the SSO handlers are loaded, if they are enabled by configuration. - This is mostly useful to ensure that the CAS/SAML/OIDC handlers register themselves + This is mostly useful to ensure that the OIDC handler registers itself with the main SsoHandler. It's safe to call this multiple times. """ - if hs.config.cas.cas_enabled: - hs.get_cas_handler() - if hs.config.saml2.saml2_enabled: - hs.get_saml_handler() if hs.config.oidc.oidc_enabled: hs.get_oidc_handler() diff --git a/synapse/rest/client/media.py b/synapse/rest/client/media.py
index c30e3022de..4c044ae900 100644 --- a/synapse/rest/client/media.py +++ b/synapse/rest/client/media.py
@@ -102,10 +102,17 @@ class MediaConfigResource(RestServlet): self.clock = hs.get_clock() self.auth = hs.get_auth() self.limits_dict = {"m.upload.size": config.media.max_upload_size} + self.media_repository_callbacks = hs.get_module_api_callbacks().media_repository async def on_GET(self, request: SynapseRequest) -> None: - await self.auth.get_user_by_req(request) - respond_with_json(request, 200, self.limits_dict, send_cors=True) + requester = await self.auth.get_user_by_req(request) + user_specific_config = ( + await self.media_repository_callbacks.get_media_config_for_user( + requester.user.to_string(), + ) + ) + response = user_specific_config if user_specific_config else self.limits_dict + respond_with_json(request, 200, response, send_cors=True) class ThumbnailResource(RestServlet): @@ -138,7 +145,7 @@ class ThumbnailResource(RestServlet): ) -> None: # Validate the server name, raising if invalid parse_and_validate_server_name(server_name) - await self.auth.get_user_by_req(request) + await self.auth.get_user_by_req(request, allow_guest=True) set_cors_headers(request) set_corp_headers(request) @@ -229,7 +236,7 @@ class DownloadResource(RestServlet): # Validate the server name, raising if invalid parse_and_validate_server_name(server_name) - await self.auth.get_user_by_req(request) + await self.auth.get_user_by_req(request, allow_guest=True) set_cors_headers(request) set_corp_headers(request) diff --git a/synapse/rest/client/presence.py b/synapse/rest/client/presence.py
index 572e92642c..104d54cd89 100644 --- a/synapse/rest/client/presence.py +++ b/synapse/rest/client/presence.py
@@ -19,12 +19,13 @@ # # -""" This module contains REST servlets to do with presence: /presence/<paths> -""" +"""This module contains REST servlets to do with presence: /presence/<paths>""" + import logging from typing import TYPE_CHECKING, Tuple -from synapse.api.errors import AuthError, SynapseError +from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseError +from synapse.api.ratelimiting import Ratelimiter from synapse.handlers.presence import format_user_presence_state from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -48,6 +49,14 @@ class PresenceStatusRestServlet(RestServlet): self.presence_handler = hs.get_presence_handler() self.clock = hs.get_clock() self.auth = hs.get_auth() + self.store = hs.get_datastores().main + + # Ratelimiter for presence updates, keyed by requester. + self._presence_per_user_limiter = Ratelimiter( + store=self.store, + clock=self.clock, + cfg=hs.config.ratelimiting.rc_presence_per_user, + ) async def on_GET( self, request: SynapseRequest, user_id: str @@ -82,6 +91,17 @@ class PresenceStatusRestServlet(RestServlet): if requester.user != user: raise AuthError(403, "Can only set your own presence state") + # ignore the presence update if the ratelimit is exceeded + try: + await self._presence_per_user_limiter.ratelimit(requester) + except LimitExceededError as e: + logger.debug("User presence ratelimit exceeded; ignoring it.") + return 429, { + "errcode": Codes.LIMIT_EXCEEDED, + "error": "Too many requests", + "retry_after_ms": e.retry_after_ms, + } + state = {} content = parse_json_object_from_request(request) diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py
index c1a80c5c3d..8326d8017c 100644 --- a/synapse/rest/client/profile.py +++ b/synapse/rest/client/profile.py
@@ -19,12 +19,15 @@ # # -""" This module contains REST servlets to do with profile: /profile/<paths> """ +"""This module contains REST servlets to do with profile: /profile/<paths>""" +import re from http import HTTPStatus from typing import TYPE_CHECKING, Tuple +from synapse.api.constants import ProfileFields from synapse.api.errors import Codes, SynapseError +from synapse.handlers.profile import MAX_CUSTOM_FIELD_LEN from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -33,7 +36,8 @@ from synapse.http.servlet import ( ) from synapse.http.site import SynapseRequest from synapse.rest.client._base import client_patterns -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, JsonValue, UserID +from synapse.util.stringutils import is_namedspaced_grammar if TYPE_CHECKING: from synapse.server import HomeServer @@ -91,6 +95,11 @@ class ProfileDisplaynameRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + requester = await self.auth.get_user_by_req(request, allow_guest=True) user = UserID.from_string(user_id) is_admin = await self.auth.is_server_admin(requester) @@ -101,9 +110,7 @@ class ProfileDisplaynameRestServlet(RestServlet): new_name = content["displayname"] except Exception: raise SynapseError( - code=400, - msg="Unable to parse name", - errcode=Codes.BAD_JSON, + 400, "Missing key 'displayname'", errcode=Codes.MISSING_PARAM ) propagate = _read_propagate(self.hs, request) @@ -166,6 +173,11 @@ class ProfileAvatarURLRestServlet(RestServlet): async def on_PUT( self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + requester = await self.auth.get_user_by_req(request) user = UserID.from_string(user_id) is_admin = await self.auth.is_server_admin(requester) @@ -227,19 +239,185 @@ class ProfileRestServlet(RestServlet): user = UserID.from_string(user_id) await self.profile_handler.check_profile_query_allowed(user, requester_user) - displayname = await self.profile_handler.get_displayname(user) - avatar_url = await self.profile_handler.get_avatar_url(user) - - ret = {} - if displayname is not None: - ret["displayname"] = displayname - if avatar_url is not None: - ret["avatar_url"] = avatar_url + ret = await self.profile_handler.get_profile(user_id) return 200, ret +class UnstableProfileFieldRestServlet(RestServlet): + PATTERNS = [ + re.compile( + r"^/_matrix/client/unstable/uk\.tcpip\.msc4133/profile/(?P<user_id>[^/]*)/(?P<field_name>[^/]*)" + ) + ] + CATEGORY = "Event sending requests" + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.hs = hs + self.profile_handler = hs.get_profile_handler() + self.auth = hs.get_auth() + + async def on_GET( + self, request: SynapseRequest, user_id: str, field_name: str + ) -> Tuple[int, JsonDict]: + requester_user = None + + if self.hs.config.server.require_auth_for_profile_requests: + requester = await self.auth.get_user_by_req(request) + requester_user = requester.user + + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + + if not field_name: + raise SynapseError(400, "Field name too short", errcode=Codes.INVALID_PARAM) + + if len(field_name.encode("utf-8")) > MAX_CUSTOM_FIELD_LEN: + raise SynapseError(400, "Field name too long", errcode=Codes.KEY_TOO_LARGE) + if not is_namedspaced_grammar(field_name): + raise SynapseError( + 400, + "Field name does not follow Common Namespaced Identifier Grammar", + errcode=Codes.INVALID_PARAM, + ) + + user = UserID.from_string(user_id) + await self.profile_handler.check_profile_query_allowed(user, requester_user) + + if field_name == ProfileFields.DISPLAYNAME: + field_value: JsonValue = await self.profile_handler.get_displayname(user) + elif field_name == ProfileFields.AVATAR_URL: + field_value = await self.profile_handler.get_avatar_url(user) + else: + field_value = await self.profile_handler.get_profile_field(user, field_name) + + return 200, {field_name: field_value} + + async def on_PUT( + self, request: SynapseRequest, user_id: str, field_name: str + ) -> Tuple[int, JsonDict]: + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + + requester = await self.auth.get_user_by_req(request) + user = UserID.from_string(user_id) + is_admin = await self.auth.is_server_admin(requester) + + if not field_name: + raise SynapseError(400, "Field name too short", errcode=Codes.INVALID_PARAM) + + if len(field_name.encode("utf-8")) > MAX_CUSTOM_FIELD_LEN: + raise SynapseError(400, "Field name too long", errcode=Codes.KEY_TOO_LARGE) + if not is_namedspaced_grammar(field_name): + raise SynapseError( + 400, + "Field name does not follow Common Namespaced Identifier Grammar", + errcode=Codes.INVALID_PARAM, + ) + + content = parse_json_object_from_request(request) + try: + new_value = content[field_name] + except KeyError: + raise SynapseError( + 400, f"Missing key '{field_name}'", errcode=Codes.MISSING_PARAM + ) + + propagate = _read_propagate(self.hs, request) + + requester_suspended = ( + await self.hs.get_datastores().main.get_user_suspended_status( + requester.user.to_string() + ) + ) + + if requester_suspended: + raise SynapseError( + 403, + "Updating profile while account is suspended is not allowed.", + Codes.USER_ACCOUNT_SUSPENDED, + ) + + if field_name == ProfileFields.DISPLAYNAME: + await self.profile_handler.set_displayname( + user, requester, new_value, is_admin, propagate=propagate + ) + elif field_name == ProfileFields.AVATAR_URL: + await self.profile_handler.set_avatar_url( + user, requester, new_value, is_admin, propagate=propagate + ) + else: + await self.profile_handler.set_profile_field( + user, requester, field_name, new_value, is_admin + ) + + return 200, {} + + async def on_DELETE( + self, request: SynapseRequest, user_id: str, field_name: str + ) -> Tuple[int, JsonDict]: + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + + requester = await self.auth.get_user_by_req(request) + user = UserID.from_string(user_id) + is_admin = await self.auth.is_server_admin(requester) + + if not field_name: + raise SynapseError(400, "Field name too short", errcode=Codes.INVALID_PARAM) + + if len(field_name.encode("utf-8")) > MAX_CUSTOM_FIELD_LEN: + raise SynapseError(400, "Field name too long", errcode=Codes.KEY_TOO_LARGE) + if not is_namedspaced_grammar(field_name): + raise SynapseError( + 400, + "Field name does not follow Common Namespaced Identifier Grammar", + errcode=Codes.INVALID_PARAM, + ) + + propagate = _read_propagate(self.hs, request) + + requester_suspended = ( + await self.hs.get_datastores().main.get_user_suspended_status( + requester.user.to_string() + ) + ) + + if requester_suspended: + raise SynapseError( + 403, + "Updating profile while account is suspended is not allowed.", + Codes.USER_ACCOUNT_SUSPENDED, + ) + + if field_name == ProfileFields.DISPLAYNAME: + await self.profile_handler.set_displayname( + user, requester, "", is_admin, propagate=propagate + ) + elif field_name == ProfileFields.AVATAR_URL: + await self.profile_handler.set_avatar_url( + user, requester, "", is_admin, propagate=propagate + ) + else: + await self.profile_handler.delete_profile_field( + user, requester, field_name, is_admin + ) + + return 200, {} + + def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + # The specific displayname / avatar URL / custom field endpoints *must* appear + # before their corresponding generic profile endpoint. ProfileDisplaynameRestServlet(hs).register(http_server) ProfileAvatarURLRestServlet(hs).register(http_server) ProfileRestServlet(hs).register(http_server) + if hs.config.experimental.msc4133_enabled: + UnstableProfileFieldRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/pusher.py b/synapse/rest/client/pusher.py
index a455f95a26..2463b3b38c 100644 --- a/synapse/rest/client/pusher.py +++ b/synapse/rest/client/pusher.py
@@ -34,7 +34,6 @@ from synapse.http.site import SynapseRequest from synapse.push import PusherConfigException from synapse.rest.admin.experimental_features import ExperimentalFeature from synapse.rest.client._base import client_patterns -from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource from synapse.types import JsonDict if TYPE_CHECKING: @@ -161,21 +160,6 @@ class PushersSetRestServlet(RestServlet): return 200, {} -class LegacyPushersRemoveRestServlet(UnsubscribeResource, RestServlet): - """ - A servlet to handle legacy "email unsubscribe" links, forwarding requests to the ``UnsubscribeResource`` - - This should be kept for some time, so unsubscribe links in past emails stay valid. - """ - - PATTERNS = client_patterns("/pushers/remove$", releases=[], v1=False, unstable=True) - - async def on_GET(self, request: SynapseRequest) -> None: - # Forward the request to the UnsubscribeResource - await self._async_render(request) - - def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: PushersRestServlet(hs).register(http_server) PushersSetRestServlet(hs).register(http_server) - LegacyPushersRemoveRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py
index 89203dc45a..4bf93f485c 100644 --- a/synapse/rest/client/receipts.py +++ b/synapse/rest/client/receipts.py
@@ -39,9 +39,7 @@ logger = logging.getLogger(__name__) class ReceiptRestServlet(RestServlet): PATTERNS = client_patterns( - "/rooms/(?P<room_id>[^/]*)" - "/receipt/(?P<receipt_type>[^/]*)" - "/(?P<event_id>[^/]*)$" + "/rooms/(?P<room_id>[^/]*)/receipt/(?P<receipt_type>[^/]*)/(?P<event_id>[^/]*)$" ) CATEGORY = "Receipts requests" diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py
index 5dddbc69be..9d18d8ba25 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py
@@ -38,14 +38,12 @@ from synapse.api.errors import ( InteractiveAuthIncompleteError, NotApprovedError, SynapseError, - ThreepidValidationError, UnrecognizedRequestError, ) from synapse.api.ratelimiting import Ratelimiter from synapse.config import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.ratelimiting import FederationRatelimitSettings -from synapse.config.server import is_threepid_reserved from synapse.handlers.auth import AuthHandler from synapse.handlers.ui_auth import UIAuthSessionDataConstants from synapse.http.server import HttpServer, finish_request, respond_with_html @@ -56,17 +54,9 @@ from synapse.http.servlet import ( parse_string, ) from synapse.http.site import SynapseRequest -from synapse.metrics import threepid_send_requests -from synapse.push.mailer import Mailer from synapse.types import JsonDict -from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import assert_valid_client_secret, random_string -from synapse.util.threepids import ( - canonicalise_email, - check_3pid_allowed, - validate_email, -) from ._base import client_patterns, interactive_auth_handler @@ -76,247 +66,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class EmailRegisterRequestTokenRestServlet(RestServlet): - PATTERNS = client_patterns("/register/email/requestToken$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.identity_handler = hs.get_identity_handler() - self.config = hs.config - - if self.hs.config.email.can_verify_email: - self.registration_mailer = Mailer( - hs=self.hs, - app_name=self.config.email.email_app_name, - template_html=self.config.email.email_registration_template_html, - template_text=self.config.email.email_registration_template_text, - ) - self.already_in_use_mailer = Mailer( - hs=self.hs, - app_name=self.config.email.email_app_name, - template_html=self.config.email.email_already_in_use_template_html, - template_text=self.config.email.email_already_in_use_template_text, - ) - - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - if not self.hs.config.email.can_verify_email: - logger.warning( - "Email registration has been disabled due to lack of email config" - ) - raise SynapseError( - 400, "Email-based registration has been disabled on this server" - ) - body = parse_json_object_from_request(request) - - assert_params_in_dict(body, ["client_secret", "email", "send_attempt"]) - - # Extract params from body - client_secret = body["client_secret"] - assert_valid_client_secret(client_secret) - - # For emails, canonicalise the address. - # We store all email addresses canonicalised in the DB. - # (See on_POST in EmailThreepidRequestTokenRestServlet - # in synapse/rest/client/account.py) - try: - email = validate_email(body["email"]) - except ValueError as e: - raise SynapseError(400, str(e)) - send_attempt = body["send_attempt"] - next_link = body.get("next_link") # Optional param - - if not await check_3pid_allowed(self.hs, "email", email, registration=True): - raise SynapseError( - 403, - "Your email domain is not authorized to register on this server", - Codes.THREEPID_DENIED, - ) - - await self.identity_handler.ratelimit_request_token_requests( - request, "email", email - ) - - existing_user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( - "email", email - ) - - if existing_user_id is not None: - if self.hs.config.server.request_token_inhibit_3pid_errors: - # Make the client think the operation succeeded. See the rationale in the - # comments for request_token_inhibit_3pid_errors. - # Still send an email to warn the user that an account already exists. - # Also wait for some random amount of time between 100ms and 1s to make it - # look like we did something. - await self.already_in_use_mailer.send_already_in_use_mail(email) - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) - return 200, {"sid": random_string(16)} - - raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) - - # Send registration emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.registration_mailer.send_registration_mail, - next_link, - ) - - threepid_send_requests.labels(type="email", reason="register").observe( - send_attempt - ) - - # Wrap the session id in a JSON object - return 200, {"sid": sid} - - -class MsisdnRegisterRequestTokenRestServlet(RestServlet): - PATTERNS = client_patterns("/register/msisdn/requestToken$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.identity_handler = hs.get_identity_handler() - - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - body = parse_json_object_from_request(request) - - assert_params_in_dict( - body, ["client_secret", "country", "phone_number", "send_attempt"] - ) - client_secret = body["client_secret"] - assert_valid_client_secret(client_secret) - country = body["country"] - phone_number = body["phone_number"] - send_attempt = body["send_attempt"] - next_link = body.get("next_link") # Optional param - - msisdn = phone_number_to_msisdn(country, phone_number) - - if not await check_3pid_allowed(self.hs, "msisdn", msisdn, registration=True): - raise SynapseError( - 403, - "Phone numbers are not authorized to register on this server", - Codes.THREEPID_DENIED, - ) - - await self.identity_handler.ratelimit_request_token_requests( - request, "msisdn", msisdn - ) - - existing_user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( - "msisdn", msisdn - ) - - if existing_user_id is not None: - if self.hs.config.server.request_token_inhibit_3pid_errors: - # Make the client think the operation succeeded. See the rationale in the - # comments for request_token_inhibit_3pid_errors. - # Also wait for some random amount of time between 100ms and 1s to make it - # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) - return 200, {"sid": random_string(16)} - - raise SynapseError( - 400, "Phone number is already in use", Codes.THREEPID_IN_USE - ) - - if not self.hs.config.registration.account_threepid_delegate_msisdn: - logger.warning( - "No upstream msisdn account_threepid_delegate configured on the server to " - "handle this request" - ) - raise SynapseError( - 400, "Registration by phone number is not supported on this homeserver" - ) - - ret = await self.identity_handler.requestMsisdnToken( - self.hs.config.registration.account_threepid_delegate_msisdn, - country, - phone_number, - client_secret, - send_attempt, - next_link, - ) - - threepid_send_requests.labels(type="msisdn", reason="register").observe( - send_attempt - ) - - return 200, ret - - -class RegistrationSubmitTokenServlet(RestServlet): - """Handles registration 3PID validation token submission""" - - PATTERNS = client_patterns( - "/registration/(?P<medium>[^/]*)/submit_token$", releases=(), unstable=True - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.auth = hs.get_auth() - self.config = hs.config - self.clock = hs.get_clock() - self.store = hs.get_datastores().main - - if self.config.email.can_verify_email: - self._failure_email_template = ( - self.config.email.email_registration_template_failure_html - ) - - async def on_GET(self, request: Request, medium: str) -> None: - if medium != "email": - raise SynapseError( - 400, "This medium is currently not supported for registration" - ) - if not self.config.email.can_verify_email: - logger.warning( - "User registration via email has been disabled due to lack of email config" - ) - raise SynapseError( - 400, "Email-based registration is disabled on this server" - ) - - sid = parse_string(request, "sid", required=True) - client_secret = parse_string(request, "client_secret", required=True) - assert_valid_client_secret(client_secret) - token = parse_string(request, "token", required=True) - - # Attempt to validate a 3PID session - try: - # Mark the session as valid - next_link = await self.store.validate_threepid_session( - sid, client_secret, token, self.clock.time_msec() - ) - - # Perform a 302 redirect if next_link is set - if next_link: - if next_link.startswith("file:///"): - logger.warning( - "Not redirecting to next_link as it is a local file: address" - ) - else: - request.setResponseCode(302) - request.setHeader("Location", next_link) - finish_request(request) - return None - - # Otherwise show the success template - html = self.config.email.email_registration_template_success_html_content - status_code = 200 - except ThreepidValidationError as e: - status_code = e.code - - # Show a failure page with a reason - template_vars = {"failure_reason": e.msg} - html = self._failure_email_template.render(**template_vars) - - respond_with_html(request, status_code, html) - - class UsernameAvailabilityRestServlet(RestServlet): PATTERNS = client_patterns("/register/available") @@ -420,7 +169,6 @@ class RegisterRestServlet(RestServlet): self.store = hs.get_datastores().main self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() - self.identity_handler = hs.get_identity_handler() self.room_member_handler = hs.get_room_member_handler() self.macaroon_gen = hs.get_macaroon_generator() self.ratelimiter = hs.get_registration_ratelimiter() @@ -605,27 +353,6 @@ class RegisterRestServlet(RestServlet): ) raise - # Check that we're not trying to register a denied 3pid. - # - # the user-facing checks will probably already have happened in - # /register/email/requestToken when we requested a 3pid, but that's not - # guaranteed. - if auth_result: - for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]: - if login_type in auth_result: - medium = auth_result[login_type]["medium"] - address = auth_result[login_type]["address"] - - if not await check_3pid_allowed( - self.hs, medium, address, registration=True - ): - raise SynapseError( - 403, - "Third party identifiers (email/phone numbers)" - + " are not authorized on this server", - Codes.THREEPID_DENIED, - ) - if registered_user_id is not None: logger.info( "Already registered user ID %r for this session", registered_user_id @@ -640,12 +367,10 @@ class RegisterRestServlet(RestServlet): if not password_hash: raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM) - desired_username = ( - await ( - self.password_auth_provider.get_username_for_registration( - auth_result, - params, - ) + desired_username = await ( + self.password_auth_provider.get_username_for_registration( + auth_result, + params, ) ) @@ -657,50 +382,13 @@ class RegisterRestServlet(RestServlet): if desired_username is not None: desired_username = desired_username.lower() - threepid = None - if auth_result: - threepid = auth_result.get(LoginType.EMAIL_IDENTITY) - - # Also check that we're not trying to register a 3pid that's already - # been registered. - # - # This has probably happened in /register/email/requestToken as well, - # but if a user hits this endpoint twice then clicks on each link from - # the two activation emails, they would register the same 3pid twice. - for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]: - if login_type in auth_result: - medium = auth_result[login_type]["medium"] - address = auth_result[login_type]["address"] - # For emails, canonicalise the address. - # We store all email addresses canonicalised in the DB. - # (See on_POST in EmailThreepidRequestTokenRestServlet - # in synapse/rest/client/account.py) - if medium == "email": - try: - address = canonicalise_email(address) - except ValueError as e: - raise SynapseError(400, str(e)) - - existing_user_id = await self.store.get_user_id_by_threepid( - medium, address - ) - - if existing_user_id is not None: - raise SynapseError( - 400, - "%s is already in use" % medium, - Codes.THREEPID_IN_USE, - ) - entries = await self.store.get_user_agents_ips_to_ui_auth_session( session_id ) - display_name = ( - await ( - self.password_auth_provider.get_displayname_for_registration( - auth_result, params - ) + display_name = await ( + self.password_auth_provider.get_displayname_for_registration( + auth_result, params ) ) @@ -708,18 +396,10 @@ class RegisterRestServlet(RestServlet): localpart=desired_username, password_hash=password_hash, guest_access_token=guest_access_token, - threepid=threepid, default_display_name=display_name, address=client_addr, user_agent_ips=entries, ) - # Necessary due to auth checks prior to the threepid being - # written to the db - if threepid: - if is_threepid_reserved( - self.hs.config.server.mau_limits_reserved_threepids, threepid - ): - await self.store.upsert_monthly_active_user(registered_user_id) # Remember that the user account has been registered (and the user # ID it was registered with, since it might not have been specified). @@ -775,9 +455,12 @@ class RegisterRestServlet(RestServlet): body: JsonDict, should_issue_refresh_token: bool = False, ) -> JsonDict: - user_id = await self.registration_handler.appservice_register( + user_id, appservice = await self.registration_handler.appservice_register( username, as_token ) + if appservice.msc4190_device_management: + body["inhibit_login"] = True + return await self._create_registration_details( user_id, body, @@ -909,6 +592,14 @@ class RegisterAppServiceOnlyRestServlet(RestServlet): await self.ratelimiter.ratelimit(None, client_addr, update=False) + # Allow only ASes to use this API. + if body.get("type") != APP_SERVICE_REGISTRATION_TYPE: + raise SynapseError( + 403, + "Registration has been disabled. Only m.login.application_service registrations are allowed.", + errcode=Codes.FORBIDDEN, + ) + kind = parse_string(request, "kind", default="user") if kind == "guest": @@ -924,10 +615,6 @@ class RegisterAppServiceOnlyRestServlet(RestServlet): if not isinstance(desired_username, str) or len(desired_username) > 512: raise SynapseError(400, "Invalid username") - # Allow only ASes to use this API. - if body.get("type") != APP_SERVICE_REGISTRATION_TYPE: - raise SynapseError(403, "Non-application service registration type") - if not self.auth.has_access_token(request): raise SynapseError( 400, @@ -941,7 +628,7 @@ class RegisterAppServiceOnlyRestServlet(RestServlet): as_token = self.auth.get_access_token_from_request(request) - user_id = await self.registration_handler.appservice_register( + user_id, _ = await self.registration_handler.appservice_register( desired_username, as_token ) return 200, {"user_id": user_id} @@ -958,60 +645,11 @@ def _calculate_registration_flows( Returns: a list of supported flows """ - # FIXME: need a better error than "no auth flow found" for scenarios - # where we required 3PID for registration but the user didn't give one - require_email = "email" in config.registration.registrations_require_3pid - require_msisdn = "msisdn" in config.registration.registrations_require_3pid - - show_msisdn = True - show_email = True - - if config.registration.disable_msisdn_registration: - show_msisdn = False - require_msisdn = False - enabled_auth_types = auth_handler.get_enabled_auth_types() - if LoginType.EMAIL_IDENTITY not in enabled_auth_types: - show_email = False - if require_email: - raise ConfigError( - "Configuration requires email address at registration, but email " - "validation is not configured" - ) - - if LoginType.MSISDN not in enabled_auth_types: - show_msisdn = False - if require_msisdn: - raise ConfigError( - "Configuration requires msisdn at registration, but msisdn " - "validation is not configured" - ) - flows = [] - # only support 3PIDless registration if no 3PIDs are required - if not require_email and not require_msisdn: - # Add a dummy step here, otherwise if a client completes - # recaptcha first we'll assume they were going for this flow - # and complete the request, when they could have been trying to - # complete one of the flows with email/msisdn auth. - flows.append([LoginType.DUMMY]) - - # only support the email-only flow if we don't require MSISDN 3PIDs - if show_email and not require_msisdn: - flows.append([LoginType.EMAIL_IDENTITY]) - - # only support the MSISDN-only flow if we don't require email 3PIDs - if show_msisdn and not require_email: - flows.append([LoginType.MSISDN]) - - if show_email and show_msisdn: - # always let users provide both MSISDN & email - flows.append([LoginType.MSISDN, LoginType.EMAIL_IDENTITY]) - - # Add a flow that doesn't require any 3pids, if the config requests it. - if config.registration.enable_registration_token_3pid_bypass: - flows.append([LoginType.REGISTRATION_TOKEN]) + # We don't support 3PIDs + flows.append([LoginType.DUMMY]) # Prepend m.login.terms to all flows if we're requiring consent if config.consent.user_consent_at_registration: @@ -1037,10 +675,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: RegisterAppServiceOnlyRestServlet(hs).register(http_server) return - if hs.config.worker.worker_app is None: - EmailRegisterRequestTokenRestServlet(hs).register(http_server) - MsisdnRegisterRequestTokenRestServlet(hs).register(http_server) - RegistrationSubmitTokenServlet(hs).register(http_server) UsernameAvailabilityRestServlet(hs).register(http_server) RegistrationTokenValidityRestServlet(hs).register(http_server) RegisterRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/rendezvous.py b/synapse/rest/client/rendezvous.py
index 27bf53314a..a1808847f0 100644 --- a/synapse/rest/client/rendezvous.py +++ b/synapse/rest/client/rendezvous.py
@@ -34,51 +34,6 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -# n.b [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) has now been closed. -# However, we want to keep this implementation around for some time. -# TODO: define an end-of-life date for this implementation. -class MSC3886RendezvousServlet(RestServlet): - """ - This is a placeholder implementation of [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) - simple client rendezvous capability that is used by the "Sign in with QR" functionality. - - This implementation only serves as a 307 redirect to a configured server rather than being a full implementation. - - A module that implements the full functionality is available at: https://pypi.org/project/matrix-http-rendezvous-synapse/. - - Request: - - POST /rendezvous HTTP/1.1 - Content-Type: ... - - ... - - Response: - - HTTP/1.1 307 - Location: <configured endpoint> - """ - - PATTERNS = client_patterns( - "/org.matrix.msc3886/rendezvous$", releases=[], v1=False, unstable=True - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - redirection_target: Optional[str] = hs.config.experimental.msc3886_endpoint - assert ( - redirection_target is not None - ), "Servlet is only registered if there is a redirection target" - self.endpoint = redirection_target.encode("utf-8") - - async def on_POST(self, request: SynapseRequest) -> None: - respond_with_redirect( - request, self.endpoint, statusCode=TEMPORARY_REDIRECT, cors=True - ) - - # PUT, GET and DELETE are not implemented as they should be fulfilled by the redirect target. - - class MSC4108DelegationRendezvousServlet(RestServlet): PATTERNS = client_patterns( "/org.matrix.msc4108/rendezvous$", releases=[], v1=False, unstable=True @@ -89,9 +44,9 @@ class MSC4108DelegationRendezvousServlet(RestServlet): redirection_target: Optional[str] = ( hs.config.experimental.msc4108_delegation_endpoint ) - assert ( - redirection_target is not None - ), "Servlet is only registered if there is a delegation target" + assert redirection_target is not None, ( + "Servlet is only registered if there is a delegation target" + ) self.endpoint = redirection_target.encode("utf-8") async def on_POST(self, request: SynapseRequest) -> None: @@ -114,9 +69,6 @@ class MSC4108RendezvousServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.experimental.msc3886_endpoint is not None: - MSC3886RendezvousServlet(hs).register(http_server) - if hs.config.experimental.msc4108_enabled: MSC4108RendezvousServlet(hs).register(http_server) diff --git a/synapse/rest/client/reporting.py b/synapse/rest/client/reporting.py
index 4eee53e5a8..c5037be8b7 100644 --- a/synapse/rest/client/reporting.py +++ b/synapse/rest/client/reporting.py
@@ -23,7 +23,7 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, Tuple -from synapse._pydantic_compat import HAS_PYDANTIC_V2 +from synapse._pydantic_compat import StrictStr from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import ( @@ -40,10 +40,6 @@ from ._base import client_patterns if TYPE_CHECKING: from synapse.server import HomeServer -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import StrictStr -else: - from pydantic import StrictStr logger = logging.getLogger(__name__) @@ -109,18 +105,17 @@ class ReportEventRestServlet(RestServlet): class ReportRoomRestServlet(RestServlet): """This endpoint lets clients report a room for abuse. - Whilst MSC4151 is not yet merged, this unstable endpoint is enabled on matrix.org - for content moderation purposes, and therefore backwards compatibility should be - carefully considered when changing anything on this endpoint. - - More details on the MSC: https://github.com/matrix-org/matrix-spec-proposals/pull/4151 + Introduced by MSC4151: https://github.com/matrix-org/matrix-spec-proposals/pull/4151 """ - PATTERNS = client_patterns( - "/org.matrix.msc4151/rooms/(?P<room_id>[^/]*)/report$", - releases=[], - v1=False, - unstable=True, + # Cast the Iterable to a list so that we can `append` below. + PATTERNS = list( + client_patterns( + "/rooms/(?P<room_id>[^/]*)/report$", + releases=("v3",), + unstable=False, + v1=False, + ) ) def __init__(self, hs: "HomeServer"): @@ -157,6 +152,4 @@ class ReportRoomRestServlet(RestServlet): def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReportEventRestServlet(hs).register(http_server) - - if hs.config.experimental.msc4151_enabled: - ReportRoomRestServlet(hs).register(http_server) + ReportRoomRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py
index 903c74f6d8..38230de0de 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py
@@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2014-2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -19,7 +19,8 @@ # # -""" This module contains REST servlets to do with rooms: /rooms/<paths> """ +"""This module contains REST servlets to do with rooms: /rooms/<paths>""" + import logging import re from enum import Enum @@ -67,7 +68,8 @@ from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, Requester, StreamToken, ThirdPartyInstanceID, UserID from synapse.types.state import StateFilter from synapse.util.cancellation import cancellable -from synapse.util.stringutils import parse_and_validate_server_name, random_string +from synapse.util.events import generate_fake_event_id +from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: from synapse.server import HomeServer @@ -193,7 +195,10 @@ class RoomStateEventRestServlet(RestServlet): self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self.message_handler = hs.get_message_handler() + self.delayed_events_handler = hs.get_delayed_events_handler() self.auth = hs.get_auth() + self._max_event_delay_ms = hs.config.server.max_event_delay_ms + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker def register(self, http_server: HttpServer) -> None: # /rooms/$roomid/state/$eventtype @@ -285,10 +290,45 @@ class RoomStateEventRestServlet(RestServlet): content = parse_json_object_from_request(request) + is_requester_admin = await self.auth.is_server_admin(requester) + if not is_requester_admin: + spam_check = ( + await self._spam_checker_module_callbacks.user_may_send_state_event( + user_id=requester.user.to_string(), + room_id=room_id, + event_type=event_type, + state_key=state_key, + content=content, + ) + ) + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: + raise SynapseError( + 403, + "You are not permitted to send the state event", + errcode=spam_check[0], + additional_fields=spam_check[1], + ) + origin_server_ts = None if requester.app_service: origin_server_ts = parse_integer(request, "ts") + delay = _parse_request_delay(request, self._max_event_delay_ms) + if delay is not None: + delay_id = await self.delayed_events_handler.add( + requester, + room_id=room_id, + event_type=event_type, + state_key=state_key, + origin_server_ts=origin_server_ts, + content=content, + delay=delay, + ) + + set_tag("delay_id", delay_id) + ret = {"delay_id": delay_id} + return 200, ret + try: if event_type == EventTypes.Member: membership = content.get("membership", None) @@ -325,7 +365,7 @@ class RoomStateEventRestServlet(RestServlet): ) event_id = event.event_id except ShadowBanError: - event_id = "$" + random_string(43) + event_id = generate_fake_event_id() set_tag("event_id", event_id) ret = {"event_id": event_id} @@ -339,7 +379,9 @@ class RoomSendEventRestServlet(TransactionRestServlet): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() + self.delayed_events_handler = hs.get_delayed_events_handler() self.auth = hs.get_auth() + self._max_event_delay_ms = hs.config.server.max_event_delay_ms def register(self, http_server: HttpServer) -> None: # /rooms/$roomid/send/$event_type[/$txn_id] @@ -356,6 +398,26 @@ class RoomSendEventRestServlet(TransactionRestServlet): ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) + origin_server_ts = None + if requester.app_service: + origin_server_ts = parse_integer(request, "ts") + + delay = _parse_request_delay(request, self._max_event_delay_ms) + if delay is not None: + delay_id = await self.delayed_events_handler.add( + requester, + room_id=room_id, + event_type=event_type, + state_key=None, + origin_server_ts=origin_server_ts, + content=content, + delay=delay, + ) + + set_tag("delay_id", delay_id) + ret = {"delay_id": delay_id} + return 200, ret + event_dict: JsonDict = { "type": event_type, "content": content, @@ -363,10 +425,8 @@ class RoomSendEventRestServlet(TransactionRestServlet): "sender": requester.user.to_string(), } - if requester.app_service: - origin_server_ts = parse_integer(request, "ts") - if origin_server_ts is not None: - event_dict["origin_server_ts"] = origin_server_ts + if origin_server_ts is not None: + event_dict["origin_server_ts"] = origin_server_ts try: ( @@ -377,7 +437,7 @@ class RoomSendEventRestServlet(TransactionRestServlet): ) event_id = event.event_id except ShadowBanError: - event_id = "$" + random_string(43) + event_id = generate_fake_event_id() set_tag("event_id", event_id) return 200, {"event_id": event_id} @@ -409,6 +469,49 @@ class RoomSendEventRestServlet(TransactionRestServlet): ) +def _parse_request_delay( + request: SynapseRequest, + max_delay: Optional[int], +) -> Optional[int]: + """Parses from the request string the delay parameter for + delayed event requests, and checks it for correctness. + + Args: + request: the twisted HTTP request. + max_delay: the maximum allowed value of the delay parameter, + or None if no delay parameter is allowed. + Returns: + The value of the requested delay, or None if it was absent. + + Raises: + SynapseError: if the delay parameter is present and forbidden, + or if it exceeds the maximum allowed value. + """ + delay = parse_integer(request, "org.matrix.msc4140.delay") + if delay is None: + return None + if max_delay is None: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Delayed events are not supported on this server", + Codes.UNKNOWN, + { + "org.matrix.msc4140.errcode": "M_MAX_DELAY_UNSUPPORTED", + }, + ) + if delay > max_delay: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "The requested delay exceeds the allowed maximum.", + Codes.UNKNOWN, + { + "org.matrix.msc4140.errcode": "M_MAX_DELAY_EXCEEDED", + "org.matrix.msc4140.max_delay": max_delay, + }, + ) + return delay + + # TODO: Needs unit testing for room ID + alias joins class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet): CATEGORY = "Event sending requests" @@ -417,7 +520,6 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet): super().__init__(hs) super(ResolveRoomIdMixin, self).__init__(hs) # ensure the Mixin is set up self.auth = hs.get_auth() - self._support_via = hs.config.experimental.msc4156_enabled def register(self, http_server: HttpServer) -> None: # /join/$room_identifier[/$txn_id] @@ -435,13 +537,11 @@ class JoinRoomAliasServlet(ResolveRoomIdMixin, TransactionRestServlet): # twisted.web.server.Request.args is incorrectly defined as Optional[Any] args: Dict[bytes, List[bytes]] = request.args # type: ignore - remote_room_hosts = parse_strings_from_args(args, "server_name", required=False) - if self._support_via: + # Prefer via over server_name (deprecated with MSC4156) + remote_room_hosts = parse_strings_from_args(args, "via", required=False) + if remote_room_hosts is None: remote_room_hosts = parse_strings_from_args( - args, - "org.matrix.msc4156.via", - default=remote_room_hosts, - required=False, + args, "server_name", required=False ) room_id, remote_room_hosts = await self.resolve_room_id( room_identifier, @@ -703,9 +803,9 @@ class RoomMessageListRestServlet(RestServlet): # decorator on `get_number_joined_users_in_room` doesn't play well with # the type system. Maybe in the future, it can use some ParamSpec # wizardry to fix it up. - room_member_count_deferred = run_in_background( # type: ignore[call-arg] + room_member_count_deferred = run_in_background( # type: ignore[call-overload] self.store.get_number_joined_users_in_room, - room_id, # type: ignore[arg-type] + room_id, ) requester = await self.auth.get_user_by_req(request, allow_guest=True) @@ -814,12 +914,10 @@ class RoomEventServlet(RestServlet): requester = await self.auth.get_user_by_req(request, allow_guest=True) include_unredacted_content = self.msc2815_enabled and ( - parse_string( + parse_boolean( request, - "fi.mau.msc2815.include_unredacted_content", - allowed_values=("true", "false"), + "fi.mau.msc2815.include_unredacted_content" ) - == "true" ) if include_unredacted_content and not await self.auth.is_server_admin( requester @@ -1193,7 +1291,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): event_id = event.event_id except ShadowBanError: - event_id = "$" + random_string(43) + event_id = generate_fake_event_id() set_tag("event_id", event_id) return 200, {"event_id": event_id} diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 8c5db2a513..bac02122d0 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py
@@ -21,12 +21,13 @@ import itertools import logging from collections import defaultdict -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState from synapse.api.errors import Codes, StoreError, SynapseError from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState +from synapse.api.ratelimiting import Ratelimiter from synapse.events.utils import ( SerializeEventConfig, format_event_for_client_v2_without_room_id, @@ -126,6 +127,13 @@ class SyncRestServlet(RestServlet): cache_name="sync_valid_filter", ) + # Ratelimiter for presence updates, keyed by requester. + self._presence_per_user_limiter = Ratelimiter( + store=self.store, + clock=self.clock, + cfg=hs.config.ratelimiting.rc_presence_per_user, + ) + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # This will always be set by the time Twisted calls us. assert request.args is not None @@ -152,6 +160,14 @@ class SyncRestServlet(RestServlet): filter_id = parse_string(request, "filter") full_state = parse_boolean(request, "full_state", default=False) + use_state_after = False + if await self.store.is_feature_enabled( + user.to_string(), ExperimentalFeature.MSC4222 + ): + use_state_after = parse_boolean( + request, "org.matrix.msc4222.use_state_after", default=False + ) + logger.debug( "/sync: user=%r, timeout=%r, since=%r, " "set_presence=%r, filter_id=%r, device_id=%r", @@ -184,6 +200,7 @@ class SyncRestServlet(RestServlet): full_state, device_id, last_ignore_accdata_streampos, + use_state_after, ) if filter_id is None: @@ -220,6 +237,7 @@ class SyncRestServlet(RestServlet): filter_collection=filter_collection, is_guest=requester.is_guest, device_id=device_id, + use_state_after=use_state_after, ) since_token = None @@ -229,7 +247,13 @@ class SyncRestServlet(RestServlet): # send any outstanding server notices to the user. await self._server_notices_sender.on_user_syncing(user.to_string()) - affect_presence = set_presence != PresenceState.OFFLINE + # ignore the presence update if the ratelimit is exceeded but do not pause the request + allowed, _ = await self._presence_per_user_limiter.can_do_action(requester) + if not allowed: + affect_presence = False + logger.debug("User set_presence ratelimit exceeded; ignoring it.") + else: + affect_presence = set_presence != PresenceState.OFFLINE context = await self.presence_handler.user_syncing( user.to_string(), @@ -258,7 +282,7 @@ class SyncRestServlet(RestServlet): # We know that the the requester has an access token since appservices # cannot use sync. response_content = await self.encode_response( - time_now, sync_result, requester, filter_collection + time_now, sync_config, sync_result, requester, filter_collection ) logger.debug("Event formatting complete") @@ -268,6 +292,7 @@ class SyncRestServlet(RestServlet): async def encode_response( self, time_now: int, + sync_config: SyncConfig, sync_result: SyncResult, requester: Requester, filter: FilterCollection, @@ -292,7 +317,7 @@ class SyncRestServlet(RestServlet): ) joined = await self.encode_joined( - sync_result.joined, time_now, serialize_options + sync_config, sync_result.joined, time_now, serialize_options ) invited = await self.encode_invited( @@ -304,7 +329,7 @@ class SyncRestServlet(RestServlet): ) archived = await self.encode_archived( - sync_result.archived, time_now, serialize_options + sync_config, sync_result.archived, time_now, serialize_options ) logger.debug("building sync response dict") @@ -372,6 +397,7 @@ class SyncRestServlet(RestServlet): @trace_with_opname("sync.encode_joined") async def encode_joined( self, + sync_config: SyncConfig, rooms: List[JoinedSyncResult], time_now: int, serialize_options: SerializeEventConfig, @@ -380,6 +406,7 @@ class SyncRestServlet(RestServlet): Encode the joined rooms in a sync result Args: + sync_config rooms: list of sync results for rooms this user is joined to time_now: current time - used as a baseline for age calculations serialize_options: Event serializer options @@ -389,7 +416,11 @@ class SyncRestServlet(RestServlet): joined = {} for room in rooms: joined[room.room_id] = await self.encode_room( - room, time_now, joined=True, serialize_options=serialize_options + sync_config, + room, + time_now, + joined=True, + serialize_options=serialize_options, ) return joined @@ -419,7 +450,12 @@ class SyncRestServlet(RestServlet): ) unsigned = dict(invite.get("unsigned", {})) invite["unsigned"] = unsigned - invited_state = list(unsigned.pop("invite_room_state", [])) + + invited_state = unsigned.pop("invite_room_state", []) + if not isinstance(invited_state, list): + invited_state = [] + + invited_state = list(invited_state) invited_state.append(invite) invited[room.room_id] = {"invite_state": {"events": invited_state}} @@ -459,7 +495,10 @@ class SyncRestServlet(RestServlet): # Extract the stripped room state from the unsigned dict # This is for clients to get a little bit of information about # the room they've knocked on, without revealing any sensitive information - knocked_state = list(unsigned.pop("knock_room_state", [])) + knocked_state = unsigned.pop("knock_room_state", []) + if not isinstance(knocked_state, list): + knocked_state = [] + knocked_state = list(knocked_state) # Append the actual knock membership event itself as well. This provides # the client with: @@ -477,6 +516,7 @@ class SyncRestServlet(RestServlet): @trace_with_opname("sync.encode_archived") async def encode_archived( self, + sync_config: SyncConfig, rooms: List[ArchivedSyncResult], time_now: int, serialize_options: SerializeEventConfig, @@ -485,6 +525,7 @@ class SyncRestServlet(RestServlet): Encode the archived rooms in a sync result Args: + sync_config rooms: list of sync results for rooms this user is joined to time_now: current time - used as a baseline for age calculations serialize_options: Event serializer options @@ -494,13 +535,18 @@ class SyncRestServlet(RestServlet): joined = {} for room in rooms: joined[room.room_id] = await self.encode_room( - room, time_now, joined=False, serialize_options=serialize_options + sync_config, + room, + time_now, + joined=False, + serialize_options=serialize_options, ) return joined async def encode_room( self, + sync_config: SyncConfig, room: Union[JoinedSyncResult, ArchivedSyncResult], time_now: int, joined: bool, @@ -508,6 +554,7 @@ class SyncRestServlet(RestServlet): ) -> JsonDict: """ Args: + sync_config room: sync result for a single room time_now: current time - used as a baseline for age calculations token_id: ID of the user's auth token - used for namespacing @@ -548,13 +595,20 @@ class SyncRestServlet(RestServlet): account_data = room.account_data + # We either include a `state` or `state_after` field depending on + # whether the client has opted in to the newer `state_after` behavior. + if sync_config.use_state_after: + state_key_name = "org.matrix.msc4222.state_after" + else: + state_key_name = "state" + result: JsonDict = { "timeline": { "events": serialized_timeline, "prev_batch": await room.timeline.prev_batch.to_string(self.store), "limited": room.timeline.limited, }, - "state": {"events": serialized_state}, + state_key_name: {"events": serialized_state}, "account_data": {"events": account_data}, } @@ -688,6 +742,7 @@ class SlidingSyncE2eeRestServlet(RestServlet): filter_collection=self.only_member_events_filter_collection, is_guest=requester.is_guest, device_id=device_id, + use_state_after=False, # We don't return any rooms so this flag is a no-op ) since_token = None @@ -975,7 +1030,7 @@ class SlidingSyncRestServlet(RestServlet): return response def encode_lists( - self, lists: Dict[str, SlidingSyncResult.SlidingWindowList] + self, lists: Mapping[str, SlidingSyncResult.SlidingWindowList] ) -> JsonDict: def encode_operation( operation: SlidingSyncResult.SlidingWindowList.Operation, @@ -1010,13 +1065,19 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms: Dict[str, JsonDict] = {} for room_id, room_result in rooms.items(): serialized_rooms[room_id] = { - "bump_stamp": room_result.bump_stamp, - "joined_count": room_result.joined_count, - "invited_count": room_result.invited_count, "notification_count": room_result.notification_count, "highlight_count": room_result.highlight_count, } + if room_result.bump_stamp is not None: + serialized_rooms[room_id]["bump_stamp"] = room_result.bump_stamp + + if room_result.joined_count is not None: + serialized_rooms[room_id]["joined_count"] = room_result.joined_count + + if room_result.invited_count is not None: + serialized_rooms[room_id]["invited_count"] = room_result.invited_count + if room_result.name: serialized_rooms[room_id]["name"] = room_result.name @@ -1040,10 +1101,15 @@ class SlidingSyncRestServlet(RestServlet): serialized_rooms[room_id]["heroes"] = serialized_heroes # We should only include the `initial` key if it's `True` to save bandwidth. - # The absense of this flag means `False`. + # The absence of this flag means `False`. if room_result.initial: serialized_rooms[room_id]["initial"] = room_result.initial + if room_result.unstable_expanded_timeline: + serialized_rooms[room_id]["unstable_expanded_timeline"] = ( + room_result.unstable_expanded_timeline + ) + # This will be omitted for invite/knock rooms with `stripped_state` if ( room_result.required_state is not None @@ -1077,9 +1143,9 @@ class SlidingSyncRestServlet(RestServlet): # This will be omitted for invite/knock rooms with `stripped_state` if room_result.prev_batch is not None: - serialized_rooms[room_id]["prev_batch"] = ( - await room_result.prev_batch.to_string(self.store) - ) + serialized_rooms[room_id][ + "prev_batch" + ] = await room_result.prev_batch.to_string(self.store) # This will be omitted for invite/knock rooms with `stripped_state` if room_result.num_live is not None: diff --git a/synapse/rest/client/tags.py b/synapse/rest/client/tags.py
index 554bcb95dd..b6648f3499 100644 --- a/synapse/rest/client/tags.py +++ b/synapse/rest/client/tags.py
@@ -78,6 +78,7 @@ class TagServlet(RestServlet): super().__init__() self.auth = hs.get_auth() self.handler = hs.get_account_data_handler() + self.room_member_handler = hs.get_room_member_handler() async def on_PUT( self, request: SynapseRequest, user_id: str, room_id: str, tag: str @@ -85,6 +86,12 @@ class TagServlet(RestServlet): requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add tags for other users.") + # Check if the user has any membership in the room and raise error if not. + # Although it's not harmful for users to tag random rooms, it's just superfluous + # data we don't need to track or allow. + await self.room_member_handler.check_for_any_membership_in_room( + user_id=user_id, room_id=room_id + ) body = parse_json_object_from_request(request) diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py
index 30c1f17fc6..1a57996aec 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py
@@ -21,6 +21,7 @@ """This module contains logic for storing HTTP PUT transactions. This is used to ensure idempotency when performing PUTs using the REST API.""" + import logging from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Hashable, Tuple @@ -93,9 +94,9 @@ class HttpTransactionCache: # (appservice and guest users), but does not cover access tokens minted # by the admin API. Use the access token ID instead. else: - assert ( - requester.access_token_id is not None - ), "Requester must have an access_token_id" + assert requester.access_token_id is not None, ( + "Requester must have an access_token_id" + ) return (path, "user_admin", requester.access_token_id) def fetch_or_execute_request( diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 75df684416..f58f11e5cc 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py
@@ -4,7 +4,7 @@ # Copyright 2019 The Matrix.org Foundation C.I.C. # Copyright 2017 Vector Creations Ltd # Copyright 2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -64,6 +64,7 @@ class VersionsRestServlet(RestServlet): async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: msc3881_enabled = self.config.experimental.msc3881_enabled + msc3575_enabled = self.config.experimental.msc3575_enabled if self.auth.has_access_token(request): requester = await self.auth.get_user_by_req( @@ -77,6 +78,9 @@ class VersionsRestServlet(RestServlet): msc3881_enabled = await self.store.is_feature_enabled( user_id, ExperimentalFeature.MSC3881 ) + msc3575_enabled = await self.store.is_feature_enabled( + user_id, ExperimentalFeature.MSC3575 + ) return ( 200, @@ -145,9 +149,6 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc3881": msc3881_enabled, # Adds support for filtering /messages by event relation. "org.matrix.msc3874": self.config.experimental.msc3874_enabled, - # Adds support for simple HTTP rendezvous as per MSC3886 - "org.matrix.msc3886": self.config.experimental.msc3886_endpoint - is not None, # Adds support for relation-based redactions as per MSC3912. "org.matrix.msc3912": self.config.experimental.msc3912_enabled, # Whether recursively provide relations is supported. @@ -167,8 +168,14 @@ class VersionsRestServlet(RestServlet): is not None ) ), - # MSC4151: Report room API (Client-Server API) - "org.matrix.msc4151": self.config.experimental.msc4151_enabled, + # MSC4140: Delayed events + "org.matrix.msc4140": bool(self.config.server.max_event_delay_ms), + # Simplified sliding sync + "org.matrix.simplified_msc3575": msc3575_enabled, + # Arbitrary key-value profile fields. + "uk.tcpip.msc4133": self.config.experimental.msc4133_enabled, + # MSC4155: Invite filtering + "org.matrix.msc4155": self.config.experimental.msc4155_enabled, }, }, ) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py
index a411ed614e..fea0b9706d 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py
@@ -23,17 +23,11 @@ import logging import re from typing import TYPE_CHECKING, Dict, Mapping, Optional, Set, Tuple -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import Extra, StrictInt, StrictStr -else: - from pydantic import StrictInt, StrictStr, Extra - from signedjson.sign import sign_json from twisted.web.server import Request +from synapse._pydantic_compat import Extra, StrictInt, StrictStr from synapse.crypto.keyring import ServerKeyFetcher from synapse.http.server import HttpServer from synapse.http.servlet import ( @@ -191,10 +185,10 @@ class RemoteKey(RestServlet): server_keys: Dict[Tuple[str, str], Optional[FetchKeyResultForRemote]] = {} for server_name, key_ids in query.items(): if key_ids: - results: Mapping[str, Optional[FetchKeyResultForRemote]] = ( - await self.store.get_server_keys_json_for_remote( - server_name, key_ids - ) + results: Mapping[ + str, Optional[FetchKeyResultForRemote] + ] = await self.store.get_server_keys_json_for_remote( + server_name, key_ids ) else: results = await self.store.get_all_server_keys_json_for_remote( diff --git a/synapse/rest/media/config_resource.py b/synapse/rest/media/config_resource.py
index 80462d65d3..b014e91bdb 100644 --- a/synapse/rest/media/config_resource.py +++ b/synapse/rest/media/config_resource.py
@@ -40,7 +40,14 @@ class MediaConfigResource(RestServlet): self.clock = hs.get_clock() self.auth = hs.get_auth() self.limits_dict = {"m.upload.size": config.media.max_upload_size} + self.media_repository_callbacks = hs.get_module_api_callbacks().media_repository async def on_GET(self, request: SynapseRequest) -> None: - await self.auth.get_user_by_req(request) - respond_with_json(request, 200, self.limits_dict, send_cors=True) + requester = await self.auth.get_user_by_req(request) + user_specific_config = ( + await self.media_repository_callbacks.get_media_config_for_user( + requester.user.to_string() + ) + ) + response = user_specific_config if user_specific_config else self.limits_dict + respond_with_json(request, 200, response, send_cors=True) diff --git a/synapse/rest/media/upload_resource.py b/synapse/rest/media/upload_resource.py
index 5ef6bf8836..572f7897fd 100644 --- a/synapse/rest/media/upload_resource.py +++ b/synapse/rest/media/upload_resource.py
@@ -50,9 +50,12 @@ class BaseUploadServlet(RestServlet): self.server_name = hs.hostname self.auth = hs.get_auth() self.max_upload_size = hs.config.media.max_upload_size + self._media_repository_callbacks = ( + hs.get_module_api_callbacks().media_repository + ) - def _get_file_metadata( - self, request: SynapseRequest + async def _get_file_metadata( + self, request: SynapseRequest, user_id: str ) -> Tuple[int, Optional[str], str]: raw_content_length = request.getHeader("Content-Length") if raw_content_length is None: @@ -67,7 +70,14 @@ class BaseUploadServlet(RestServlet): code=413, errcode=Codes.TOO_LARGE, ) - + if not await self._media_repository_callbacks.is_user_allowed_to_upload_media_of_size( + user_id, content_length + ): + raise SynapseError( + msg="Upload request body is too large", + code=413, + errcode=Codes.TOO_LARGE, + ) args: Dict[bytes, List[bytes]] = request.args # type: ignore upload_name_bytes = parse_bytes_from_args(args, "filename") if upload_name_bytes: @@ -94,7 +104,7 @@ class BaseUploadServlet(RestServlet): # if headers.hasHeader(b"Content-Disposition"): # disposition = headers.getRawHeaders(b"Content-Disposition")[0] - # TODO(markjh): parse content-dispostion + # TODO(markjh): parse content-disposition return content_length, upload_name, media_type @@ -104,7 +114,9 @@ class UploadServlet(BaseUploadServlet): async def on_POST(self, request: SynapseRequest) -> None: requester = await self.auth.get_user_by_req(request) - content_length, upload_name, media_type = self._get_file_metadata(request) + content_length, upload_name, media_type = await self._get_file_metadata( + request, requester.user.to_string() + ) try: content: IO = request.content # type: ignore @@ -152,7 +164,9 @@ class AsyncUploadServlet(BaseUploadServlet): async with lock: await self.media_repo.verify_can_upload(media_id, requester.user) - content_length, upload_name, media_type = self._get_file_metadata(request) + content_length, upload_name, media_type = await self._get_file_metadata( + request, requester.user.to_string() + ) try: content: IO = request.content # type: ignore diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py
index 7b5bfc0421..982b6c0e7e 100644 --- a/synapse/rest/synapse/client/__init__.py +++ b/synapse/rest/synapse/client/__init__.py
@@ -29,7 +29,6 @@ from synapse.rest.synapse.client.pick_idp import PickIdpResource from synapse.rest.synapse.client.pick_username import pick_username_resource from synapse.rest.synapse.client.rendezvous import MSC4108RendezvousSessionResource from synapse.rest.synapse.client.sso_register import SsoRegisterResource -from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource if TYPE_CHECKING: from synapse.server import HomeServer @@ -50,9 +49,7 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc "/_synapse/client/pick_idp": PickIdpResource(hs), "/_synapse/client/pick_username": pick_username_resource(hs), "/_synapse/client/new_user_consent": NewUserConsentResource(hs), - "/_synapse/client/sso_register": SsoRegisterResource(hs), - # Unsubscribe to notification emails link - "/_synapse/client/unsubscribe": UnsubscribeResource(hs), + "/_synapse/client/sso_register": SsoRegisterResource(hs) } # Expose the JWKS endpoint if OAuth2 delegation is enabled @@ -68,16 +65,6 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc resources["/_synapse/client/oidc"] = OIDCResource(hs) - if hs.config.saml2.saml2_enabled: - from synapse.rest.synapse.client.saml2 import SAML2Resource - - res = SAML2Resource(hs) - resources["/_synapse/client/saml2"] = res - - # This is also mounted under '/_matrix' for backwards-compatibility. - # To be removed in Synapse v1.32.0. - resources["/_matrix/saml2"] = res - if hs.config.federation.federation_whitelist_endpoint_enabled: resources[FederationWhitelistResource.PATH] = FederationWhitelistResource(hs) diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py deleted file mode 100644
index 29e4b2d07a..0000000000 --- a/synapse/rest/synapse/client/password_reset.py +++ /dev/null
@@ -1,129 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2020 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# -import logging -from typing import TYPE_CHECKING, Tuple - -from twisted.web.server import Request - -from synapse.api.errors import ThreepidValidationError -from synapse.http.server import DirectServeHtmlResource -from synapse.http.servlet import parse_string -from synapse.util.stringutils import assert_valid_client_secret - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class PasswordResetSubmitTokenResource(DirectServeHtmlResource): - """Handles 3PID validation token submission - - This resource gets mounted under /_synapse/client/password_reset/email/submit_token - """ - - isLeaf = 1 - - def __init__(self, hs: "HomeServer"): - """ - Args: - hs: server - """ - super().__init__() - - self.clock = hs.get_clock() - self.store = hs.get_datastores().main - - self._confirmation_email_template = ( - hs.config.email.email_password_reset_template_confirmation_html - ) - self._email_password_reset_template_success_html = ( - hs.config.email.email_password_reset_template_success_html_content - ) - self._failure_email_template = ( - hs.config.email.email_password_reset_template_failure_html - ) - - # This resource should only be mounted if email validation is enabled - assert hs.config.email.can_verify_email - - async def _async_render_GET(self, request: Request) -> Tuple[int, bytes]: - sid = parse_string(request, "sid", required=True) - token = parse_string(request, "token", required=True) - client_secret = parse_string(request, "client_secret", required=True) - assert_valid_client_secret(client_secret) - - # Show a confirmation page, just in case someone accidentally clicked this link when - # they didn't mean to - template_vars = { - "sid": sid, - "token": token, - "client_secret": client_secret, - } - return ( - 200, - self._confirmation_email_template.render(**template_vars).encode("utf-8"), - ) - - async def _async_render_POST(self, request: Request) -> Tuple[int, bytes]: - sid = parse_string(request, "sid", required=True) - token = parse_string(request, "token", required=True) - client_secret = parse_string(request, "client_secret", required=True) - - # Attempt to validate a 3PID session - try: - # Mark the session as valid - next_link = await self.store.validate_threepid_session( - sid, client_secret, token, self.clock.time_msec() - ) - - # Perform a 302 redirect if next_link is set - if next_link: - if next_link.startswith("file:///"): - logger.warning( - "Not redirecting to next_link as it is a local file: address" - ) - else: - next_link_bytes = next_link.encode("utf-8") - request.setHeader("Location", next_link_bytes) - return ( - 302, - ( - b'You are being redirected to <a href="%s">%s</a>.' - % (next_link_bytes, next_link_bytes) - ), - ) - - # Otherwise show the success template - html_bytes = self._email_password_reset_template_success_html.encode( - "utf-8" - ) - status_code = 200 - except ThreepidValidationError as e: - status_code = e.code - - # Show a failure page with a reason - template_vars = {"failure_reason": e.msg} - html_bytes = self._failure_email_template.render(**template_vars).encode( - "utf-8" - ) - - return status_code, html_bytes diff --git a/synapse/rest/synapse/client/pick_idp.py b/synapse/rest/synapse/client/pick_idp.py
index f26929bd60..5e599f85b0 100644 --- a/synapse/rest/synapse/client/pick_idp.py +++ b/synapse/rest/synapse/client/pick_idp.py
@@ -21,6 +21,7 @@ import logging from typing import TYPE_CHECKING +from synapse.api.urls import LoginSSORedirectURIBuilder from synapse.http.server import ( DirectServeHtmlResource, finish_request, @@ -49,6 +50,8 @@ class PickIdpResource(DirectServeHtmlResource): hs.config.sso.sso_login_idp_picker_template ) self._server_name = hs.hostname + self._public_baseurl = hs.config.server.public_baseurl + self._login_sso_redirect_url_builder = LoginSSORedirectURIBuilder(hs.config) async def _async_render_GET(self, request: SynapseRequest) -> None: client_redirect_url = parse_string( @@ -56,25 +59,23 @@ class PickIdpResource(DirectServeHtmlResource): ) idp = parse_string(request, "idp", required=False) - # if we need to pick an IdP, do so + # If we need to pick an IdP, do so if not idp: return await self._serve_id_picker(request, client_redirect_url) - # otherwise, redirect to the IdP's redirect URI - providers = self._sso_handler.get_identity_providers() - auth_provider = providers.get(idp) - if not auth_provider: - logger.info("Unknown idp %r", idp) - self._sso_handler.render_error( - request, "unknown_idp", "Unknown identity provider ID" + # Otherwise, redirect to the login SSO redirect endpoint for the given IdP + # (which will in turn take us to the the IdP's redirect URI). + # + # We could go directly to the IdP's redirect URI, but this way we ensure that + # the user goes through the same logic as normal flow. Additionally, if a proxy + # needs to intercept the request, it only needs to intercept the one endpoint. + sso_login_redirect_url = ( + self._login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id=idp, client_redirect_url=client_redirect_url ) - return - - sso_url = await auth_provider.handle_redirect_request( - request, client_redirect_url.encode("utf8") ) - logger.info("Redirecting to %s", sso_url) - request.redirect(sso_url) + logger.info("Redirecting to %s", sso_login_redirect_url) + request.redirect(sso_login_redirect_url) finish_request(request) async def _serve_id_picker( diff --git a/synapse/rest/synapse/client/saml2/__init__.py b/synapse/rest/synapse/client/saml2/__init__.py deleted file mode 100644
index 3658c6a0e3..0000000000 --- a/synapse/rest/synapse/client/saml2/__init__.py +++ /dev/null
@@ -1,42 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -import logging -from typing import TYPE_CHECKING - -from twisted.web.resource import Resource - -from synapse.rest.synapse.client.saml2.metadata_resource import SAML2MetadataResource -from synapse.rest.synapse.client.saml2.response_resource import SAML2ResponseResource - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class SAML2Resource(Resource): - def __init__(self, hs: "HomeServer"): - Resource.__init__(self) - self.putChild(b"metadata.xml", SAML2MetadataResource(hs)) - self.putChild(b"authn_response", SAML2ResponseResource(hs)) - - -__all__ = ["SAML2Resource"] diff --git a/synapse/rest/synapse/client/saml2/metadata_resource.py b/synapse/rest/synapse/client/saml2/metadata_resource.py deleted file mode 100644
index bcd5195108..0000000000 --- a/synapse/rest/synapse/client/saml2/metadata_resource.py +++ /dev/null
@@ -1,46 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -from typing import TYPE_CHECKING - -import saml2.metadata - -from twisted.web.resource import Resource -from twisted.web.server import Request - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class SAML2MetadataResource(Resource): - """A Twisted web resource which renders the SAML metadata""" - - isLeaf = 1 - - def __init__(self, hs: "HomeServer"): - Resource.__init__(self) - self.sp_config = hs.config.saml2.saml2_sp_config - - def render_GET(self, request: Request) -> bytes: - metadata_xml = saml2.metadata.create_metadata_string( - configfile=None, config=self.sp_config - ) - request.setHeader(b"Content-Type", b"text/xml; charset=utf-8") - return metadata_xml diff --git a/synapse/rest/synapse/client/saml2/response_resource.py b/synapse/rest/synapse/client/saml2/response_resource.py deleted file mode 100644
index 7b8667e04c..0000000000 --- a/synapse/rest/synapse/client/saml2/response_resource.py +++ /dev/null
@@ -1,52 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -from typing import TYPE_CHECKING - -from twisted.web.server import Request - -from synapse.http.server import DirectServeHtmlResource -from synapse.http.site import SynapseRequest - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class SAML2ResponseResource(DirectServeHtmlResource): - """A Twisted web resource which handles the SAML response""" - - isLeaf = 1 - - def __init__(self, hs: "HomeServer"): - super().__init__() - self._saml_handler = hs.get_saml_handler() - self._sso_handler = hs.get_sso_handler() - - async def _async_render_GET(self, request: Request) -> None: - # We're not expecting any GET request on that resource if everything goes right, - # but some IdPs sometimes end up responding with a 302 redirect on this endpoint. - # In this case, just tell the user that something went wrong and they should - # try to authenticate again. - self._sso_handler.render_error( - request, "unexpected_get", "Unexpected GET request on /saml2/authn_response" - ) - - async def _async_render_POST(self, request: SynapseRequest) -> None: - await self._saml_handler.handle_saml_response(request) diff --git a/synapse/rest/synapse/client/unsubscribe.py b/synapse/rest/synapse/client/unsubscribe.py deleted file mode 100644
index 6d4bd9f2ed..0000000000 --- a/synapse/rest/synapse/client/unsubscribe.py +++ /dev/null
@@ -1,88 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2022 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -from typing import TYPE_CHECKING - -from synapse.api.errors import StoreError -from synapse.http.server import DirectServeHtmlResource, respond_with_html_bytes -from synapse.http.servlet import parse_string -from synapse.http.site import SynapseRequest - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class UnsubscribeResource(DirectServeHtmlResource): - """ - To allow pusher to be delete by clicking a link (ie. GET request) - """ - - SUCCESS_HTML = b"<html><body>You have been unsubscribed</body><html>" - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.notifier = hs.get_notifier() - self.auth = hs.get_auth() - self.pusher_pool = hs.get_pusherpool() - self.macaroon_generator = hs.get_macaroon_generator() - - async def _async_render_GET(self, request: SynapseRequest) -> None: - """ - Handle a user opening an unsubscribe link in the browser, either via an - HTML/Text email or via the List-Unsubscribe header. - """ - token = parse_string(request, "access_token", required=True) - app_id = parse_string(request, "app_id", required=True) - pushkey = parse_string(request, "pushkey", required=True) - - user_id = self.macaroon_generator.verify_delete_pusher_token( - token, app_id, pushkey - ) - - try: - await self.pusher_pool.remove_pusher( - app_id=app_id, pushkey=pushkey, user_id=user_id - ) - except StoreError as se: - if se.code != 404: - # This is fine: they're already unsubscribed - raise - - self.notifier.on_new_replication_data() - - respond_with_html_bytes( - request, - 200, - UnsubscribeResource.SUCCESS_HTML, - ) - - async def _async_render_POST(self, request: SynapseRequest) -> None: - """ - Handle a mail user agent POSTing to the unsubscribe URL via the - List-Unsubscribe & List-Unsubscribe-Post headers. - """ - - # TODO Assert that the body has a single field - - # Assert the body has form encoded key/value pair of - # List-Unsubscribe=One-Click. - - await self._async_render_GET(request) diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py
index d0ca8ca46b..9ce1eb6249 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py
@@ -18,12 +18,13 @@ # # import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Optional, Tuple, cast from twisted.web.resource import Resource from twisted.web.server import Request -from synapse.http.server import set_cors_headers +from synapse.api.errors import NotFoundError +from synapse.http.server import DirectServeJsonResource from synapse.http.site import SynapseRequest from synapse.types import JsonDict from synapse.util import json_encoder @@ -38,27 +39,30 @@ logger = logging.getLogger(__name__) class WellKnownBuilder: def __init__(self, hs: "HomeServer"): self._config = hs.config + self._auth = hs.get_auth() - def get_well_known(self) -> Optional[JsonDict]: + async def get_well_known(self) -> Optional[JsonDict]: if not self._config.server.serve_client_wellknown: return None result = {"m.homeserver": {"base_url": self._config.server.public_baseurl}} - if self._config.registration.default_identity_server: - result["m.identity_server"] = { - "base_url": self._config.registration.default_identity_server - } - # We use the MSC3861 values as they are used by multiple MSCs if self._config.experimental.msc3861.enabled: + # If MSC3861 is enabled, we can assume self._auth is an instance of MSC3861DelegatedAuth + # We import lazily here because of the authlib requirement + from synapse.api.auth.msc3861_delegated import MSC3861DelegatedAuth + + auth = cast(MSC3861DelegatedAuth, self._auth) + result["org.matrix.msc2965.authentication"] = { - "issuer": self._config.experimental.msc3861.issuer + "issuer": await auth.issuer(), } - if self._config.experimental.msc3861.account_management_url is not None: - result["org.matrix.msc2965.authentication"][ - "account" - ] = self._config.experimental.msc3861.account_management_url + account_management_url = await auth.account_management_url() + if account_management_url is not None: + result["org.matrix.msc2965.authentication"]["account"] = ( + account_management_url + ) if self._config.server.extra_well_known_client_content: for ( @@ -71,26 +75,22 @@ class WellKnownBuilder: return result -class ClientWellKnownResource(Resource): +class ClientWellKnownResource(DirectServeJsonResource): """A Twisted web resource which renders the .well-known/matrix/client file""" isLeaf = 1 def __init__(self, hs: "HomeServer"): - Resource.__init__(self) + super().__init__() self._well_known_builder = WellKnownBuilder(hs) - def render_GET(self, request: SynapseRequest) -> bytes: - set_cors_headers(request) - r = self._well_known_builder.get_well_known() + async def _async_render_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + r = await self._well_known_builder.get_well_known() if not r: - request.setResponseCode(404) - request.setHeader(b"Content-Type", b"text/plain") - return b".well-known not available" + raise NotFoundError(".well-known not available") logger.debug("returning: %s", r) - request.setHeader(b"Content-Type", b"application/json") - return json_encoder.encode(r).encode("utf-8") + return 200, r class ServerWellKnownResource(Resource): diff --git a/synapse/server.py b/synapse/server.py
index 46b9d83a04..03cca9eeed 100644 --- a/synapse/server.py +++ b/synapse/server.py
@@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2021 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -34,6 +34,7 @@ from typing_extensions import TypeAlias from twisted.internet.interfaces import IOpenSSLContextFactory from twisted.internet.tcp import Port +from twisted.python.threadpool import ThreadPool from twisted.web.iweb import IPolicyForHTTPS from twisted.web.resource import Resource @@ -65,8 +66,8 @@ from synapse.handlers.account_validity import AccountValidityHandler from synapse.handlers.admin import AdminHandler from synapse.handlers.appservice import ApplicationServicesHandler from synapse.handlers.auth import AuthHandler, PasswordAuthProvider -from synapse.handlers.cas import CasHandler from synapse.handlers.deactivate_account import DeactivateAccountHandler +from synapse.handlers.delayed_events import DelayedEventsHandler from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler from synapse.handlers.devicemessage import DeviceMessageHandler from synapse.handlers.directory import DirectoryHandler @@ -76,7 +77,6 @@ from synapse.handlers.event_auth import EventAuthHandler from synapse.handlers.events import EventHandler, EventStreamHandler from synapse.handlers.federation import FederationHandler from synapse.handlers.federation_event import FederationEventHandler -from synapse.handlers.identity import IdentityHandler from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler @@ -105,9 +105,9 @@ from synapse.handlers.room_member import ( RoomMemberMasterHandler, ) from synapse.handlers.room_member_worker import RoomMemberWorkerHandler +from synapse.handlers.room_policy import RoomPolicyHandler from synapse.handlers.room_summary import RoomSummaryHandler from synapse.handlers.search import SearchHandler -from synapse.handlers.send_email import SendEmailHandler from synapse.handlers.set_password import SetPasswordHandler from synapse.handlers.sliding_sync import SlidingSyncHandler from synapse.handlers.sso import SsoHandler @@ -123,6 +123,7 @@ from synapse.http.client import ( ) from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.media.media_repository import MediaRepository +from synapse.metrics import register_threadpool from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager from synapse.module_api import ModuleApi from synapse.module_api.callbacks import ModuleApiCallbacks @@ -160,7 +161,6 @@ if TYPE_CHECKING: from synapse.handlers.jwt import JwtHandler from synapse.handlers.oidc import OidcHandler - from synapse.handlers.saml import SamlHandler from synapse.storage._base import SQLBaseStore @@ -246,9 +246,12 @@ class HomeServer(metaclass=abc.ABCMeta): """ REQUIRED_ON_BACKGROUND_TASK_STARTUP = [ + "admin", "account_validity", "auth", "deactivate_account", + "delayed_events", + "e2e_keys", # for the `delete_old_otks` scheduled-task handler "message", "pagination", "profile", @@ -385,7 +388,7 @@ class HomeServer(metaclass=abc.ABCMeta): def is_mine(self, domain_specific_string: DomainSpecificString) -> bool: return domain_specific_string.domain == self.hostname - def is_mine_id(self, string: str) -> bool: + def is_mine_id(self, user_id: str) -> bool: """Determines whether a user ID or room alias originates from this homeserver. Returns: @@ -393,7 +396,7 @@ class HomeServer(metaclass=abc.ABCMeta): homeserver. `False` otherwise, or if the user ID or room alias is malformed. """ - localpart_hostname = string.split(":", 1) + localpart_hostname = user_id.split(":", 1) if len(localpart_hostname) < 2: return False return localpart_hostname[1] == self.hostname @@ -633,10 +636,6 @@ class HomeServer(metaclass=abc.ABCMeta): return FederationEventHandler(self) @cache_in_self - def get_identity_handler(self) -> IdentityHandler: - return IdentityHandler(self) - - @cache_in_self def get_initial_sync_handler(self) -> InitialSyncHandler: return InitialSyncHandler(self) @@ -657,10 +656,6 @@ class HomeServer(metaclass=abc.ABCMeta): return SearchHandler(self) @cache_in_self - def get_send_email_handler(self) -> SendEmailHandler: - return SendEmailHandler(self) - - @cache_in_self def get_set_password_handler(self) -> SetPasswordHandler: return SetPasswordHandler(self) @@ -786,22 +781,16 @@ class HomeServer(metaclass=abc.ABCMeta): return AccountValidityHandler(self) @cache_in_self - def get_cas_handler(self) -> CasHandler: - return CasHandler(self) - - @cache_in_self - def get_saml_handler(self) -> "SamlHandler": - from synapse.handlers.saml import SamlHandler - - return SamlHandler(self) - - @cache_in_self def get_oidc_handler(self) -> "OidcHandler": from synapse.handlers.oidc import OidcHandler return OidcHandler(self) @cache_in_self + def get_room_policy_handler(self) -> RoomPolicyHandler: + return RoomPolicyHandler(self) + + @cache_in_self def get_event_client_serializer(self) -> EventClientSerializer: return EventClientSerializer(self) @@ -941,3 +930,28 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_task_scheduler(self) -> TaskScheduler: return TaskScheduler(self) + + @cache_in_self + def get_media_sender_thread_pool(self) -> ThreadPool: + """Fetch the threadpool used to read files when responding to media + download requests.""" + + # We can choose a large threadpool size as these threads predominately + # do IO rather than CPU work. + media_threadpool = ThreadPool( + name="media_threadpool", minthreads=1, maxthreads=50 + ) + + media_threadpool.start() + self.get_reactor().addSystemEventTrigger( + "during", "shutdown", media_threadpool.stop + ) + + # Register the threadpool with our metrics. + register_threadpool("media", media_threadpool) + + return media_threadpool + + @cache_in_self + def get_delayed_events_handler(self) -> DelayedEventsHandler: + return DelayedEventsHandler(self) diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py
index f6ea90bd4f..e88e8c9b45 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py
@@ -119,7 +119,9 @@ class ResourceLimitsServerNotices: elif not currently_blocked and limit_msg: # Room is not notifying of a block, when it ought to be. await self._apply_limit_block_notification( - user_id, limit_msg, limit_type # type: ignore + user_id, + limit_msg, + limit_type, # type: ignore ) except SynapseError as e: logger.error("Error sending resource limits server notice: %s", e) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 72b291889b..9e48e09270 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py
@@ -59,11 +59,13 @@ from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import Measure, measure_func +from synapse.util.stringutils import shortstr if TYPE_CHECKING: from synapse.server import HomeServer from synapse.storage.controllers import StateStorageController from synapse.storage.databases.main import DataStore + from synapse.storage.databases.state.deletion import StateDeletionDataStore logger = logging.getLogger(__name__) metrics_logger = logging.getLogger("synapse.state.metrics") @@ -194,6 +196,8 @@ class StateHandler: self._storage_controllers = hs.get_storage_controllers() self._events_shard_config = hs.config.worker.events_shard_config self._instance_name = hs.get_instance_name() + self._state_store = hs.get_datastores().state + self._state_deletion_store = hs.get_datastores().state_deletion self._update_current_state_client = ( ReplicationUpdateCurrentStateRestServlet.make_client(hs) @@ -355,6 +359,28 @@ class StateHandler: await_full_state=False, ) + # Ensure we still have the state groups we're relying on, and bump + # their usage time to avoid them being deleted from under us. + if entry.state_group: + missing_state_group = await self._state_deletion_store.check_state_groups_and_bump_deletion( + {entry.state_group} + ) + if missing_state_group: + raise Exception(f"Missing state group: {entry.state_group}") + elif entry.prev_group: + # We only rely on the prev group when persisting the event if we + # don't have an `entry.state_group`. + missing_state_group = await self._state_deletion_store.check_state_groups_and_bump_deletion( + {entry.prev_group} + ) + + if missing_state_group: + # If we're missing the prev group then we can just clear the + # entries, and rely on `entry._state` (which must exist if + # `entry.state_group` is None) + entry.prev_group = None + entry.delta_ids = None + state_group_before_event_prev_group = entry.prev_group deltas_to_state_group_before_event = entry.delta_ids state_ids_before_event = None @@ -475,7 +501,10 @@ class StateHandler: @trace @measure_func() async def resolve_state_groups_for_events( - self, room_id: str, event_ids: StrCollection, await_full_state: bool = True + self, + room_id: str, + event_ids: StrCollection, + await_full_state: bool = True, ) -> _StateCacheEntry: """Given a list of event_ids this method fetches the state at each event, resolves conflicts between them and returns them. @@ -511,6 +540,7 @@ class StateHandler: ) = await self._state_storage_controller.get_state_group_delta( state_group_id ) + return _StateCacheEntry( state=None, state_group=state_group_id, @@ -531,7 +561,9 @@ class StateHandler: room_version, state_to_resolve, None, - state_res_store=StateResolutionStore(self.store), + state_res_store=StateResolutionStore( + self.store, self._state_deletion_store + ), ) return result @@ -663,7 +695,25 @@ class StateResolutionHandler: async with self.resolve_linearizer.queue(group_names): cache = self._state_cache.get(group_names, None) if cache: - return cache + # Check that the returned cache entry doesn't point to deleted + # state groups. + state_groups_to_check = set() + if cache.state_group is not None: + state_groups_to_check.add(cache.state_group) + + if cache.prev_group is not None: + state_groups_to_check.add(cache.prev_group) + + missing_state_groups = await state_res_store.state_deletion_store.check_state_groups_and_bump_deletion( + state_groups_to_check + ) + + if not missing_state_groups: + return cache + else: + # There are missing state groups, so let's remove the stale + # entry and continue as if it was a cache miss. + self._state_cache.pop(group_names, None) logger.info( "Resolving state for %s with groups %s", @@ -671,6 +721,16 @@ class StateResolutionHandler: list(group_names), ) + # We double check that none of the state groups have been deleted. + # They shouldn't be as all these state groups should be referenced. + missing_state_groups = await state_res_store.state_deletion_store.check_state_groups_and_bump_deletion( + group_names + ) + if missing_state_groups: + raise Exception( + f"State groups have been deleted: {shortstr(missing_state_groups)}" + ) + state_groups_histogram.observe(len(state_groups_ids)) new_state = await self.resolve_events_with_store( @@ -884,7 +944,8 @@ class StateResolutionStore: in well defined way. """ - store: "DataStore" + main_store: "DataStore" + state_deletion_store: "StateDeletionDataStore" def get_events( self, event_ids: StrCollection, allow_rejected: bool = False @@ -899,7 +960,7 @@ class StateResolutionStore: An awaitable which resolves to a dict from event_id to event. """ - return self.store.get_events( + return self.main_store.get_events( event_ids, redact_behaviour=EventRedactBehaviour.as_is, get_prev_content=False, @@ -920,4 +981,4 @@ class StateResolutionStore: An awaitable that resolves to a set of event IDs. """ - return self.store.get_auth_chain_difference(room_id, state_sets) + return self.main_store.get_auth_chain_difference(room_id, state_sets) diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index da926ad146..d0c0a9fc96 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py
@@ -29,15 +29,15 @@ from typing import ( Generator, Iterable, List, + Literal, Optional, + Protocol, Sequence, Set, Tuple, overload, ) -from typing_extensions import Literal, Protocol - from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index e12ab94576..b5fe7dd858 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py
@@ -23,8 +23,11 @@ import logging from abc import ABCMeta from typing import TYPE_CHECKING, Any, Collection, Dict, Iterable, Optional, Union -from synapse.storage.database import make_in_list_sql_clause # noqa: F401; noqa: F401 -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + make_in_list_sql_clause, # noqa: F401 +) from synapse.types import get_domain_from_id from synapse.util import json_decoder from synapse.util.caches.descriptors import CachedFunction @@ -83,7 +86,9 @@ class SQLBaseStore(metaclass=ABCMeta): """ def _invalidate_state_caches( - self, room_id: str, members_changed: Collection[str] + self, + room_id: str, + members_changed: Collection[str], ) -> None: """Invalidates caches that are based on the current state, but does not stream invalidations down replication. @@ -109,6 +114,7 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache( "get_number_joined_users_in_room", (room_id,) ) + self._attempt_to_invalidate_cache("get_member_counts", (room_id,)) self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) # There's no easy way of invalidating this cache for just the users @@ -123,12 +129,18 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache( "_get_rooms_for_local_user_where_membership_is_inner", (user_id,) ) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", (user_id,) + ) # Purge other caches based on room state. self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_partial_current_state_ids", (room_id,)) self._attempt_to_invalidate_cache("get_room_type", (room_id,)) self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", None + ) def _invalidate_state_caches_all(self, room_id: str) -> None: """Invalidates caches that are based on the current state, but does @@ -147,6 +159,7 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("get_current_hosts_in_room", (room_id,)) self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,)) self._attempt_to_invalidate_cache("get_number_joined_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("get_member_counts", (room_id,)) self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None) self._attempt_to_invalidate_cache("get_user_in_room_with_profile", None) @@ -157,6 +170,9 @@ class SQLBaseStore(metaclass=ABCMeta): self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_room_type", (room_id,)) self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", None + ) def _attempt_to_invalidate_cache( self, cache_name: str, key: Optional[Collection[Any]] diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index f473294070..d170bbddaa 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py
@@ -40,20 +40,15 @@ from typing import ( import attr -from synapse._pydantic_compat import HAS_PYDANTIC_V2 +from synapse._pydantic_compat import BaseModel from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.engines import PostgresEngine from synapse.storage.types import Connection, Cursor -from synapse.types import JsonDict +from synapse.types import JsonDict, StrCollection from synapse.util import Clock, json_encoder from . import engines -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel -else: - from pydantic import BaseModel - if TYPE_CHECKING: from synapse.server import HomeServer from synapse.storage.database import ( @@ -487,6 +482,31 @@ class BackgroundUpdater: return not update_exists + async def have_completed_background_updates( + self, update_names: StrCollection + ) -> bool: + """Return the name of background updates that have not yet been + completed""" + if self._all_done: + return True + + # We now check if we have completed all pending background updates. We + # do this as once this returns True then it will set `self._all_done` + # and we can skip checking the database in future. + if await self.has_completed_background_updates(): + return True + + rows = await self.db_pool.simple_select_many_batch( + table="background_updates", + column="update_name", + iterable=update_names, + retcols=("update_name",), + desc="get_uncompleted_background_updates", + ) + + # If we find any rows then we've not completed the update. + return not bool(rows) + async def do_next_background_update(self, sleep: bool = True) -> bool: """Does some amount of work on the next queued background update @@ -719,9 +739,9 @@ class BackgroundUpdater: c.execute(sql) async def updater(progress: JsonDict, batch_size: int) -> int: - assert isinstance( - self.db_pool.engine, engines.PostgresEngine - ), "validate constraint background update registered for non-Postres database" + assert isinstance(self.db_pool.engine, engines.PostgresEngine), ( + "validate constraint background update registered for non-Postres database" + ) logger.info("Validating constraint %s to %s", constraint_name, table) await self.db_pool.runWithConnection(runner) @@ -769,7 +789,7 @@ class BackgroundUpdater: # we may already have a half-built index. Let's just drop it # before trying to create it again. - sql = "DROP INDEX IF EXISTS %s" % (index_name,) + sql = "DROP INDEX CONCURRENTLY IF EXISTS %s" % (index_name,) logger.debug("[SQL] %s", sql) c.execute(sql) @@ -794,7 +814,7 @@ class BackgroundUpdater: if replaces_index is not None: # We drop the old index as the new index has now been created. - sql = f"DROP INDEX IF EXISTS {replaces_index}" + sql = f"DROP INDEX CONCURRENTLY IF EXISTS {replaces_index}" logger.debug("[SQL] %s", sql) c.execute(sql) finally: @@ -880,9 +900,9 @@ class BackgroundUpdater: on the table. Used to iterate over the table. """ - assert isinstance( - self.db_pool.engine, engines.PostgresEngine - ), "validate constraint background update registered for non-Postres database" + assert isinstance(self.db_pool.engine, engines.PostgresEngine), ( + "validate constraint background update registered for non-Postres database" + ) async def updater(progress: JsonDict, batch_size: int) -> int: return await self.validate_constraint_and_delete_in_background( diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py
index d0e015bf19..f5131fe291 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py
@@ -332,6 +332,7 @@ class EventsPersistenceStorageController: # store for now. self.main_store = stores.main self.state_store = stores.state + self._state_deletion_store = stores.state_deletion assert stores.persist_events self.persist_events_store = stores.persist_events @@ -416,7 +417,7 @@ class EventsPersistenceStorageController: set_tag(SynapseTags.FUNC_ARG_PREFIX + "backfilled", str(backfilled)) async def enqueue( - item: Tuple[str, List[Tuple[EventBase, EventContext]]] + item: Tuple[str, List[Tuple[EventBase, EventContext]]], ) -> Dict[str, str]: room_id, evs_ctxs = item return await self._event_persist_queue.add_to_queue( @@ -502,8 +503,15 @@ class EventsPersistenceStorageController: """ state = await self._calculate_current_state(room_id) delta = await self._calculate_state_delta(room_id, state) + sliding_sync_table_changes = ( + await self.persist_events_store._calculate_sliding_sync_table_changes( + room_id, [], delta + ) + ) - await self.persist_events_store.update_current_state(room_id, delta) + await self.persist_events_store.update_current_state( + room_id, delta, sliding_sync_table_changes + ) async def _calculate_current_state(self, room_id: str) -> StateMap[str]: """Calculate the current state of a room, based on the forward extremities @@ -542,7 +550,9 @@ class EventsPersistenceStorageController: room_version, state_maps_by_state_group, event_map=None, - state_res_store=StateResolutionStore(self.main_store), + state_res_store=StateResolutionStore( + self.main_store, self._state_deletion_store + ), ) return await res.get_state(self._state_controller, StateFilter.all()) @@ -628,15 +638,20 @@ class EventsPersistenceStorageController: room_id, [e for e, _ in chunk] ) - await self.persist_events_store._persist_events_and_state_updates( - room_id, - chunk, - state_delta_for_room=state_delta_for_room, - new_forward_extremities=new_forward_extremities, - use_negative_stream_ordering=backfilled, - inhibit_local_membership_updates=backfilled, - new_event_links=new_event_links, - ) + # Stop the state groups from being deleted while we're persisting + # them. + async with self._state_deletion_store.persisting_state_group_references( + events_and_contexts + ): + await self.persist_events_store._persist_events_and_state_updates( + room_id, + chunk, + state_delta_for_room=state_delta_for_room, + new_forward_extremities=new_forward_extremities, + use_negative_stream_ordering=backfilled, + inhibit_local_membership_updates=backfilled, + new_event_links=new_event_links, + ) return replaced_events @@ -785,9 +800,9 @@ class EventsPersistenceStorageController: ) # Remove any events which are prev_events of any existing events. - existing_prevs: Collection[str] = ( - await self.persist_events_store._get_events_which_are_prevs(result) - ) + existing_prevs: Collection[ + str + ] = await self.persist_events_store._get_events_which_are_prevs(result) result.difference_update(existing_prevs) # Finally handle the case where the new events have soft-failed prev @@ -855,8 +870,7 @@ class EventsPersistenceStorageController: # This should only happen for outlier events. if not ev.internal_metadata.is_outlier(): raise Exception( - "Context for new event %s has no state " - "group" % (ev.event_id,) + "Context for new event %s has no state group" % (ev.event_id,) ) continue if ctx.state_group_deltas: @@ -958,7 +972,9 @@ class EventsPersistenceStorageController: room_version, state_groups, events_map, - state_res_store=StateResolutionStore(self.main_store), + state_res_store=StateResolutionStore( + self.main_store, self._state_deletion_store + ), ) state_resolutions_during_persistence.inc() diff --git a/synapse/storage/controllers/purge_events.py b/synapse/storage/controllers/purge_events.py
index e794b370c2..c2d4bf8290 100644 --- a/synapse/storage/controllers/purge_events.py +++ b/synapse/storage/controllers/purge_events.py
@@ -21,10 +21,19 @@ import itertools import logging -from typing import TYPE_CHECKING, Set +from typing import ( + TYPE_CHECKING, + Collection, + Mapping, + Optional, + Set, +) from synapse.logging.context import nested_logging_context +from synapse.metrics.background_process_metrics import wrap_as_background_process +from synapse.storage.database import LoggingTransaction from synapse.storage.databases import Databases +from synapse.types.storage import _BackgroundUpdates if TYPE_CHECKING: from synapse.server import HomeServer @@ -38,12 +47,22 @@ class PurgeEventsStorageController: def __init__(self, hs: "HomeServer", stores: Databases): self.stores = stores + if hs.config.worker.run_background_tasks: + self._delete_state_loop_call = hs.get_clock().looping_call( + self._delete_state_groups_loop, 60 * 1000 + ) + + self.stores.state.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE, + self._background_delete_unrefereneced_state_groups, + ) + async def purge_room(self, room_id: str) -> None: """Deletes all record of a room""" with nested_logging_context(room_id): - state_groups_to_delete = await self.stores.main.purge_room(room_id) - await self.stores.state.purge_room_state(room_id, state_groups_to_delete) + await self.stores.main.purge_room(room_id) + await self.stores.state.purge_room_state(room_id) async def purge_history( self, room_id: str, token: str, delete_local_events: bool @@ -68,11 +87,16 @@ class PurgeEventsStorageController: logger.info("[purge] finding state groups that can be deleted") sg_to_delete = await self._find_unreferenced_groups(state_groups) - await self.stores.state.purge_unreferenced_state_groups( - room_id, sg_to_delete + # Mark these state groups as pending deletion, they will actually + # get deleted automatically later. + await self.stores.state_deletion.mark_state_groups_as_pending_deletion( + sg_to_delete ) - async def _find_unreferenced_groups(self, state_groups: Set[int]) -> Set[int]: + async def _find_unreferenced_groups( + self, + state_groups: Collection[int], + ) -> Set[int]: """Used when purging history to figure out which state groups can be deleted. @@ -118,6 +142,307 @@ class PurgeEventsStorageController: next_to_search |= prevs state_groups_seen |= prevs + # We also check to see if anything referencing the state groups are + # also unreferenced. This helps ensure that we delete unreferenced + # state groups, if we don't then we will de-delta them when we + # delete the other state groups leading to increased DB usage. + next_edges = await self.stores.state.get_next_state_groups(current_search) + nexts = set(next_edges.keys()) + nexts -= state_groups_seen + next_to_search |= nexts + state_groups_seen |= nexts + to_delete = state_groups_seen - referenced_groups return to_delete + + @wrap_as_background_process("_delete_state_groups_loop") + async def _delete_state_groups_loop(self) -> None: + """Background task that deletes any state groups that may be pending + deletion.""" + + while True: + next_to_delete = await self.stores.state_deletion.get_next_state_group_collection_to_delete() + if next_to_delete is None: + break + + (room_id, groups_to_sequences) = next_to_delete + made_progress = await self._delete_state_groups( + room_id, groups_to_sequences + ) + + # If no progress was made in deleting the state groups, then we + # break to allow a pause before trying again next time we get + # called. + if not made_progress: + break + + async def _delete_state_groups( + self, room_id: str, groups_to_sequences: Mapping[int, int] + ) -> bool: + """Tries to delete the given state groups. + + Returns: + Whether we made progress in deleting the state groups (or marking + them as referenced). + """ + + # We double check if any of the state groups have become referenced. + # This shouldn't happen, as any usages should cause the state group to + # be removed as pending deletion. + referenced_state_groups = await self.stores.main.get_referenced_state_groups( + groups_to_sequences + ) + + if referenced_state_groups: + # We mark any state groups that have become referenced as being + # used. + await self.stores.state_deletion.mark_state_groups_as_used( + referenced_state_groups + ) + + # Update list of state groups to remove referenced ones + groups_to_sequences = { + state_group: sequence_number + for state_group, sequence_number in groups_to_sequences.items() + if state_group not in referenced_state_groups + } + + if not groups_to_sequences: + # We made progress here as long as we marked some state groups as + # now referenced. + return len(referenced_state_groups) > 0 + + return await self.stores.state.purge_unreferenced_state_groups( + room_id, + groups_to_sequences, + ) + + async def _background_delete_unrefereneced_state_groups( + self, progress: dict, batch_size: int + ) -> int: + """This background update will slowly delete any unreferenced state groups""" + + last_checked_state_group = progress.get("last_checked_state_group") + + if last_checked_state_group is None: + # This is the first run. + last_checked_state_group = ( + await self.stores.state.db_pool.simple_select_one_onecol( + table="state_groups", + keyvalues={}, + retcol="MAX(id)", + allow_none=True, + desc="get_max_state_group", + ) + ) + if last_checked_state_group is None: + # There are no state groups so the background process is finished. + await self.stores.state.db_pool.updates._end_background_update( + _BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE + ) + return batch_size + last_checked_state_group += 1 + + ( + last_checked_state_group, + final_batch, + ) = await self._delete_unreferenced_state_groups_batch( + last_checked_state_group, + batch_size, + ) + + if not final_batch: + # There are more state groups to check. + progress = { + "last_checked_state_group": last_checked_state_group, + } + await self.stores.state.db_pool.updates._background_update_progress( + _BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE, + progress, + ) + else: + # This background process is finished. + await self.stores.state.db_pool.updates._end_background_update( + _BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE + ) + + return batch_size + + async def _delete_unreferenced_state_groups_batch( + self, + last_checked_state_group: int, + batch_size: int, + ) -> tuple[int, bool]: + """Looks for unreferenced state groups starting from the last state group + checked and marks them for deletion. + + Args: + last_checked_state_group: The last state group that was checked. + batch_size: How many state groups to process in this iteration. + + Returns: + (last_checked_state_group, final_batch) + """ + + # Find all state groups that can be deleted if any of the original set are deleted. + ( + to_delete, + last_checked_state_group, + final_batch, + ) = await self._find_unreferenced_groups_for_background_deletion( + last_checked_state_group, batch_size + ) + + if len(to_delete) == 0: + return last_checked_state_group, final_batch + + await self.stores.state_deletion.mark_state_groups_as_pending_deletion( + to_delete + ) + + return last_checked_state_group, final_batch + + async def _find_unreferenced_groups_for_background_deletion( + self, + last_checked_state_group: int, + batch_size: int, + ) -> tuple[Set[int], int, bool]: + """Used when deleting unreferenced state groups in the background to figure out + which state groups can be deleted. + To avoid increased DB usage due to de-deltaing state groups, this returns only + state groups which are free standing (ie. no shared edges with referenced groups) or + state groups which do not share edges which result in a future referenced group. + + The following scenarios outline the possibilities based on state group data in + the DB. + + ie. Free standing -> state groups 1-N would be returned: + SG_1 + | + ... + | + SG_N + + ie. Previous reference -> state groups 2-N would be returned: + SG_1 <- referenced by event + | + SG_2 + | + ... + | + SG_N + + ie. Future reference -> none of the following state groups would be returned: + SG_1 + | + SG_2 + | + ... + | + SG_N <- referenced by event + + Args: + last_checked_state_group: The last state group that was checked. + batch_size: How many state groups to process in this iteration. + + Returns: + (to_delete, last_checked_state_group, final_batch) + """ + + # If a state group's next edge is not pending deletion then we don't delete the state group. + # If there is no next edge or the next edges are all marked for deletion, then delete + # the state group. + # This holds since we walk backwards from the latest state groups, ensuring that + # we've already checked newer state groups for event references along the way. + def get_next_state_groups_marked_for_deletion_txn( + txn: LoggingTransaction, + ) -> tuple[dict[int, bool], dict[int, int]]: + state_group_sql = """ + SELECT s.id, e.state_group, d.state_group + FROM ( + SELECT id FROM state_groups + WHERE id < ? ORDER BY id DESC LIMIT ? + ) as s + LEFT JOIN state_group_edges AS e ON (s.id = e.prev_state_group) + LEFT JOIN state_groups_pending_deletion AS d ON (e.state_group = d.state_group) + """ + txn.execute(state_group_sql, (last_checked_state_group, batch_size)) + + # Mapping from state group to whether we should delete it. + state_groups_to_deletion: dict[int, bool] = {} + + # Mapping from state group to prev state group. + state_groups_to_prev: dict[int, int] = {} + + for row in txn: + state_group = row[0] + next_edge = row[1] + pending_deletion = row[2] + + if next_edge is not None: + state_groups_to_prev[next_edge] = state_group + + if next_edge is not None and not pending_deletion: + # We have found an edge not marked for deletion. + # Check previous results to see if this group is part of a chain + # within this batch that qualifies for deletion. + # ie. batch contains: + # SG_1 -> SG_2 -> SG_3 + # If SG_3 is a candidate for deletion, then SG_2 & SG_1 should also + # be, even though they have edges which may not be marked for + # deletion. + # This relies on SQL results being sorted in DESC order to work. + next_is_deletion_candidate = state_groups_to_deletion.get(next_edge) + if ( + next_is_deletion_candidate is None + or not next_is_deletion_candidate + ): + state_groups_to_deletion[state_group] = False + else: + state_groups_to_deletion.setdefault(state_group, True) + else: + # This state group may be a candidate for deletion + state_groups_to_deletion.setdefault(state_group, True) + + return state_groups_to_deletion, state_groups_to_prev + + ( + state_groups_to_deletion, + state_group_edges, + ) = await self.stores.state.db_pool.runInteraction( + "get_next_state_groups_marked_for_deletion", + get_next_state_groups_marked_for_deletion_txn, + ) + deletion_candidates = { + state_group + for state_group, deletion in state_groups_to_deletion.items() + if deletion + } + + final_batch = False + state_groups = state_groups_to_deletion.keys() + if len(state_groups) < batch_size: + final_batch = True + else: + last_checked_state_group = min(state_groups) + + if len(state_groups) == 0: + return set(), last_checked_state_group, final_batch + + # Determine if any of the remaining state groups are directly referenced. + referenced = await self.stores.main.get_referenced_state_groups( + deletion_candidates + ) + + # Remove state groups from deletion_candidates which are directly referenced or share a + # future edge with a referenced state group within this batch. + def filter_reference_chains(group: Optional[int]) -> None: + while group is not None: + deletion_candidates.discard(group) + group = state_group_edges.get(group) + + for referenced_group in referenced: + filter_reference_chains(referenced_group) + + return deletion_candidates, last_checked_state_group, final_batch diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py
index b50eb8868e..f28f5d7e03 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py
@@ -234,8 +234,11 @@ class StateStorageController: RuntimeError if we don't have a state group for one or more of the events (ie they are outliers or unknown) """ + if state_filter is None: + state_filter = StateFilter.all() + await_full_state = True - if state_filter and not state_filter.must_await_full_state(self._is_mine_id): + if not state_filter.must_await_full_state(self._is_mine_id): await_full_state = False event_to_groups = await self.get_state_group_for_events( @@ -244,7 +247,7 @@ class StateStorageController: groups = set(event_to_groups.values()) group_to_state = await self.stores.state._get_state_for_groups( - groups, state_filter or StateFilter.all() + groups, state_filter ) state_event_map = await self.stores.main.get_events( @@ -292,10 +295,11 @@ class StateStorageController: RuntimeError if we don't have a state group for one or more of the events (ie they are outliers or unknown) """ - if ( - await_full_state - and state_filter - and not state_filter.must_await_full_state(self._is_mine_id) + if state_filter is None: + state_filter = StateFilter.all() + + if await_full_state and not state_filter.must_await_full_state( + self._is_mine_id ): # Full state is not required if the state filter is restrictive enough. await_full_state = False @@ -306,7 +310,7 @@ class StateStorageController: groups = set(event_to_groups.values()) group_to_state = await self.stores.state._get_state_for_groups( - groups, state_filter or StateFilter.all() + groups, state_filter ) event_to_state = { @@ -335,9 +339,10 @@ class StateStorageController: RuntimeError if we don't have a state group for the event (ie it is an outlier or is unknown) """ - state_map = await self.get_state_for_events( - [event_id], state_filter or StateFilter.all() - ) + if state_filter is None: + state_filter = StateFilter.all() + + state_map = await self.get_state_for_events([event_id], state_filter) return state_map[event_id] @trace @@ -365,9 +370,12 @@ class StateStorageController: RuntimeError if we don't have a state group for the event (ie it is an outlier or is unknown) """ + if state_filter is None: + state_filter = StateFilter.all() + state_map = await self.get_state_ids_for_events( [event_id], - state_filter or StateFilter.all(), + state_filter, await_full_state=await_full_state, ) return state_map[event_id] @@ -388,9 +396,12 @@ class StateStorageController: at the event and `state_filter` is not satisfied by partial state. Defaults to `True`. """ + if state_filter is None: + state_filter = StateFilter.all() + state_ids = await self.get_state_ids_for_event( event_id, - state_filter=state_filter or StateFilter.all(), + state_filter=state_filter, await_full_state=await_full_state, ) @@ -426,6 +437,9 @@ class StateStorageController: at the last event in the room before `stream_position` and `state_filter` is not satisfied by partial state. Defaults to `True`. """ + if state_filter is None: + state_filter = StateFilter.all() + # FIXME: This gets the state at the latest event before the stream ordering, # which might not be the same as the "current state" of the room at the time # of the stream token if there were multiple forward extremities at the time. @@ -442,7 +456,7 @@ class StateStorageController: if last_event_id: state = await self.get_state_after_event( last_event_id, - state_filter=state_filter or StateFilter.all(), + state_filter=state_filter, await_full_state=await_full_state, ) @@ -500,9 +514,10 @@ class StateStorageController: Returns: Dict of state group to state map. """ - return await self.stores.state._get_state_for_groups( - groups, state_filter or StateFilter.all() - ) + if state_filter is None: + state_filter = StateFilter.all() + + return await self.stores.state._get_state_for_groups(groups, state_filter) @trace @tag_args @@ -583,12 +598,13 @@ class StateStorageController: Returns: The current state of the room. """ - if await_full_state and ( - not state_filter or state_filter.must_await_full_state(self._is_mine_id) - ): + if state_filter is None: + state_filter = StateFilter.all() + + if await_full_state and state_filter.must_await_full_state(self._is_mine_id): await self._partial_state_room_tracker.await_full_state(room_id) - if state_filter and not state_filter.is_full(): + if state_filter is not None and not state_filter.is_full(): return await self.stores.main.get_partial_filtered_current_state_ids( room_id, state_filter ) diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 569f618193..a4941e58f6 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py
@@ -35,6 +35,8 @@ from typing import ( Iterable, Iterator, List, + Literal, + Mapping, Optional, Sequence, Tuple, @@ -46,7 +48,7 @@ from typing import ( import attr from prometheus_client import Counter, Histogram -from typing_extensions import Concatenate, Literal, ParamSpec +from typing_extensions import Concatenate, ParamSpec from twisted.enterprise import adbapi from twisted.internet.interfaces import IReactorCore @@ -64,6 +66,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor, SQLQueryParameters +from synapse.types import StrCollection from synapse.util.async_helpers import delay_cancellation from synapse.util.iterutils import batch_iter @@ -1095,6 +1098,48 @@ class DatabasePool: txn.execute(sql, vals) + @staticmethod + def simple_insert_returning_txn( + txn: LoggingTransaction, + table: str, + values: Dict[str, Any], + returning: StrCollection, + ) -> Tuple[Any, ...]: + """Executes a `INSERT INTO... RETURNING...` statement (or equivalent for + SQLite versions that don't support it). + """ + + if txn.database_engine.supports_returning: + sql = "INSERT INTO %s (%s) VALUES(%s) RETURNING %s" % ( + table, + ", ".join(k for k in values.keys()), + ", ".join("?" for _ in values.keys()), + ", ".join(k for k in returning), + ) + + txn.execute(sql, list(values.values())) + row = txn.fetchone() + assert row is not None + return row + else: + # For old versions of SQLite we do a standard insert and then can + # use `last_insert_rowid` to get at the row we just inserted + DatabasePool.simple_insert_txn( + txn, + table=table, + values=values, + ) + txn.execute("SELECT last_insert_rowid()") + row = txn.fetchone() + assert row is not None + (rowid,) = row + + row = DatabasePool.simple_select_one_txn( + txn, table=table, keyvalues={"rowid": rowid}, retcols=returning + ) + assert row is not None + return row + async def simple_insert_many( self, table: str, @@ -1254,9 +1299,9 @@ class DatabasePool: self, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], - values: Dict[str, Any], - insertion_values: Optional[Dict[str, Any]] = None, + keyvalues: Mapping[str, Any], + values: Mapping[str, Any], + insertion_values: Optional[Mapping[str, Any]] = None, where_clause: Optional[str] = None, ) -> bool: """ @@ -1299,9 +1344,9 @@ class DatabasePool: self, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], - values: Dict[str, Any], - insertion_values: Optional[Dict[str, Any]] = None, + keyvalues: Mapping[str, Any], + values: Mapping[str, Any], + insertion_values: Optional[Mapping[str, Any]] = None, where_clause: Optional[str] = None, lock: bool = True, ) -> bool: @@ -1322,7 +1367,7 @@ class DatabasePool: if lock: # We need to lock the table :( - self.engine.lock_table(txn, table) + txn.database_engine.lock_table(txn, table) def _getwhere(key: str) -> str: # If the value we're passing in is None (aka NULL), we need to use @@ -1376,13 +1421,13 @@ class DatabasePool: # successfully inserted return True + @staticmethod def simple_upsert_txn_native_upsert( - self, txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], - values: Dict[str, Any], - insertion_values: Optional[Dict[str, Any]] = None, + keyvalues: Mapping[str, Any], + values: Mapping[str, Any], + insertion_values: Optional[Mapping[str, Any]] = None, where_clause: Optional[str] = None, ) -> bool: """ @@ -1535,8 +1580,8 @@ class DatabasePool: self.simple_upsert_txn_emulated(txn, table, _keys, _vals, lock=False) + @staticmethod def simple_upsert_many_txn_native_upsert( - self, txn: LoggingTransaction, table: str, key_names: Collection[str], @@ -1966,8 +2011,8 @@ class DatabasePool: def simple_update_txn( txn: LoggingTransaction, table: str, - keyvalues: Dict[str, Any], - updatevalues: Dict[str, Any], + keyvalues: Mapping[str, Any], + updatevalues: Mapping[str, Any], ) -> int: """ Update rows in the given database table. @@ -2115,10 +2160,26 @@ class DatabasePool: if rowcount > 1: raise StoreError(500, "More than one row matched (%s)" % (table,)) - # Ideally we could use the overload decorator here to specify that the - # return type is only optional if allow_none is True, but this does not work - # when you call a static method from an instance. - # See https://github.com/python/mypy/issues/7781 + @overload + @staticmethod + def simple_select_one_txn( + txn: LoggingTransaction, + table: str, + keyvalues: Dict[str, Any], + retcols: Collection[str], + allow_none: Literal[False] = False, + ) -> Tuple[Any, ...]: ... + + @overload + @staticmethod + def simple_select_one_txn( + txn: LoggingTransaction, + table: str, + keyvalues: Dict[str, Any], + retcols: Collection[str], + allow_none: Literal[True] = True, + ) -> Optional[Tuple[Any, ...]]: ... + @staticmethod def simple_select_one_txn( txn: LoggingTransaction, diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py
index dd9fc01fb0..81886ff765 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py
@@ -26,6 +26,7 @@ from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool, make_conn from synapse.storage.databases.main.events import PersistEventsStore from synapse.storage.databases.state import StateGroupDataStore +from synapse.storage.databases.state.deletion import StateDeletionDataStore from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database @@ -49,12 +50,14 @@ class Databases(Generic[DataStoreT]): main state persist_events + state_deletion """ databases: List[DatabasePool] main: "DataStore" # FIXME: https://github.com/matrix-org/synapse/issues/11165: actually an instance of `main_store_class` state: StateGroupDataStore persist_events: Optional[PersistEventsStore] + state_deletion: StateDeletionDataStore def __init__(self, main_store_class: Type[DataStoreT], hs: "HomeServer"): # Note we pass in the main store class here as workers use a different main @@ -63,6 +66,7 @@ class Databases(Generic[DataStoreT]): self.databases = [] main: Optional[DataStoreT] = None state: Optional[StateGroupDataStore] = None + state_deletion: Optional[StateDeletionDataStore] = None persist_events: Optional[PersistEventsStore] = None for database_config in hs.config.database.databases: @@ -114,7 +118,8 @@ class Databases(Generic[DataStoreT]): if state: raise Exception("'state' data store already configured") - state = StateGroupDataStore(database, db_conn, hs) + state_deletion = StateDeletionDataStore(database, db_conn, hs) + state = StateGroupDataStore(database, db_conn, hs, state_deletion) db_conn.commit() @@ -135,7 +140,7 @@ class Databases(Generic[DataStoreT]): if not main: raise Exception("No 'main' database configured") - if not state: + if not state or not state_deletion: raise Exception("No 'state' database configured") # We use local variables here to ensure that the databases do not have @@ -143,3 +148,4 @@ class Databases(Generic[DataStoreT]): self.main = main # type: ignore[assignment] self.state = state self.persist_events = persist_events + self.state_deletion = state_deletion diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py
index 586e84f2a4..86431f6e40 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py
@@ -3,7 +3,7 @@ # # Copyright 2019-2021 The Matrix.org Foundation C.I.C. # Copyright 2014-2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -33,6 +33,7 @@ from synapse.storage.database import ( LoggingDatabaseConnection, LoggingTransaction, ) +from synapse.storage.databases.main.sliding_sync import SlidingSyncStore from synapse.storage.databases.main.stats import UserSortOrder from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.types import Cursor @@ -43,6 +44,7 @@ from .appservice import ApplicationServiceStore, ApplicationServiceTransactionSt from .cache import CacheInvalidationWorkerStore from .censor_events import CensorEventsStore from .client_ips import ClientIpWorkerStore +from .delayed_events import DelayedEventsStore from .deviceinbox import DeviceInboxStore from .devices import DeviceStore from .directory import DirectoryStore @@ -156,6 +158,8 @@ class DataStore( LockStore, SessionStore, TaskSchedulerWorkerStore, + SlidingSyncStore, + DelayedEventsStore, ): def __init__( self, diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py
index 966393869b..715815cc09 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py
@@ -34,6 +34,7 @@ from typing import ( ) from synapse.api.constants import AccountDataTypes +from synapse.api.errors import Codes, SynapseError from synapse.replication.tcp.streams import AccountDataStream from synapse.storage._base import db_to_json from synapse.storage.database import ( @@ -43,6 +44,7 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.push_rule import PushRulesWorkerStore +from synapse.storage.invite_rule import InviteRulesConfig from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import JsonDict, JsonMapping from synapse.util import json_encoder @@ -102,6 +104,8 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) self._delete_account_data_for_deactivated_users, ) + self._msc4155_enabled = hs.config.experimental.msc4155_enabled + def get_max_account_data_stream_id(self) -> int: """Get the current max stream ID for account data stream @@ -177,7 +181,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_room_account_data_for_user_txn( txn: LoggingTransaction, - ) -> Dict[str, Dict[str, JsonDict]]: + ) -> Dict[str, Dict[str, JsonMapping]]: # The 'content != '{}' condition below prevents us from using # `simple_select_list_txn` here, as it doesn't support conditions # other than 'equals'. @@ -194,7 +198,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) txn.execute(sql, (user_id,)) - by_room: Dict[str, Dict[str, JsonDict]] = {} + by_room: Dict[str, Dict[str, JsonMapping]] = {} for room_id, account_data_type, content in txn: room_data = by_room.setdefault(room_id, {}) @@ -394,7 +398,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) async def get_updated_global_account_data_for_user( self, user_id: str, stream_id: int - ) -> Mapping[str, JsonMapping]: + ) -> Dict[str, JsonMapping]: """Get all the global account_data that's changed for a user. Args: @@ -407,7 +411,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_updated_global_account_data_for_user( txn: LoggingTransaction, - ) -> Dict[str, JsonDict]: + ) -> Dict[str, JsonMapping]: sql = """ SELECT account_data_type, content FROM account_data WHERE user_id = ? AND stream_id > ? @@ -429,7 +433,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) async def get_updated_room_account_data_for_user( self, user_id: str, stream_id: int - ) -> Dict[str, Dict[str, JsonDict]]: + ) -> Dict[str, Dict[str, JsonMapping]]: """Get all the room account_data that's changed for a user. Args: @@ -442,14 +446,14 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) def get_updated_room_account_data_for_user_txn( txn: LoggingTransaction, - ) -> Dict[str, Dict[str, JsonDict]]: + ) -> Dict[str, Dict[str, JsonMapping]]: sql = """ SELECT room_id, account_data_type, content FROM room_account_data WHERE user_id = ? AND stream_id > ? """ txn.execute(sql, (user_id, stream_id)) - account_data_by_room: Dict[str, Dict[str, JsonDict]] = {} + account_data_by_room: Dict[str, Dict[str, JsonMapping]] = {} for row in txn: room_account_data = account_data_by_room.setdefault(row[0], {}) room_account_data[row[1]] = db_to_json(row[2]) @@ -467,6 +471,56 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) get_updated_room_account_data_for_user_txn, ) + async def get_updated_room_account_data_for_user_for_room( + self, + # Since there are multiple arguments with the same type, force keyword arguments + # so people don't accidentally swap the order + *, + user_id: str, + room_id: str, + from_stream_id: int, + to_stream_id: int, + ) -> Dict[str, JsonMapping]: + """Get the room account_data that's changed for a user in a room. + + (> `from_stream_id` and <= `to_stream_id`) + + Args: + user_id: The user to get the account_data for. + room_id: The room to check + from_stream_id: The point in the stream to fetch from + to_stream_id: The point in the stream to fetch to + + Returns: + A dict of the room account data. + """ + + def get_updated_room_account_data_for_user_for_room_txn( + txn: LoggingTransaction, + ) -> Dict[str, JsonMapping]: + sql = """ + SELECT account_data_type, content FROM room_account_data + WHERE user_id = ? AND room_id = ? AND stream_id > ? AND stream_id <= ? + """ + txn.execute(sql, (user_id, room_id, from_stream_id, to_stream_id)) + + room_account_data: Dict[str, JsonMapping] = {} + for row in txn: + room_account_data[row[0]] = db_to_json(row[1]) + + return room_account_data + + changed = self._account_data_stream_cache.has_entity_changed( + user_id, int(from_stream_id) + ) + if not changed: + return {} + + return await self.db_pool.runInteraction( + "get_updated_room_account_data_for_user_for_room", + get_updated_room_account_data_for_user_for_room_txn, + ) + @cached(max_entries=5000, iterable=True) async def ignored_by(self, user_id: str) -> FrozenSet[str]: """ @@ -507,6 +561,23 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) ) ) + async def get_invite_config_for_user(self, user_id: str) -> InviteRulesConfig: + """ + Get the invite configuration for the current user. + + Args: + user_id: + """ + + if not self._msc4155_enabled: + # This equates to allowing all invites, as if the setting was off. + return InviteRulesConfig(None) + + data = await self.get_global_account_data_by_type_for_user( + user_id, AccountDataTypes.MSC4155_INVITE_PERMISSION_CONFIG + ) + return InviteRulesConfig(data) + def process_replication_rows( self, stream_name: str, @@ -710,6 +781,9 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore) else: currently_ignored_users = set() + if user_id in currently_ignored_users: + raise SynapseError(400, "You cannot ignore yourself", Codes.INVALID_PARAM) + # If the data has not changed, nothing to do. if previously_ignored_users == currently_ignored_users: return diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py
index 63624f3e8f..9418fb6dd7 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py
@@ -41,6 +41,7 @@ from synapse.storage.database import ( LoggingDatabaseConnection, LoggingTransaction, ) +from synapse.storage.databases.main.events import SLIDING_SYNC_RELEVANT_STATE_SET from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.util.caches.descriptors import CachedFunction @@ -218,6 +219,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore): room_id = row.keys[0] members_changed = set(row.keys[1:]) self._invalidate_state_caches(room_id, members_changed) + self._curr_state_delta_stream_cache.entity_has_changed( # type: ignore[attr-defined] + room_id, token + ) + for user_id in members_changed: + self._membership_stream_cache.entity_has_changed(user_id, token) # type: ignore[attr-defined] elif row.cache_func == PURGE_HISTORY_CACHE_NAME: if row.keys is None: raise Exception( @@ -235,6 +241,35 @@ class CacheInvalidationWorkerStore(SQLBaseStore): room_id = row.keys[0] self._invalidate_caches_for_room_events(room_id) self._invalidate_caches_for_room(room_id) + self._curr_state_delta_stream_cache.entity_has_changed( # type: ignore[attr-defined] + room_id, token + ) + # Note: This code is commented out to improve cache performance. + # While uncommenting would provide complete correctness, our + # automatic forgotten room purge logic (see + # `forgotten_room_retention_period`) means this would frequently + # clear the entire cache (effectively) and probably have a noticable + # impact on the cache hit ratio. + # + # Not updating the cache here is safe because: + # + # 1. `_membership_stream_cache` is only used to indicate the + # *absence* of changes, i.e. "nothing has changed between tokens + # X and Y and so return early and don't query the database". + # 2. `_membership_stream_cache` is used when we query data from + # `current_state_delta_stream` and `room_memberships` but since + # nothing new is written to the database for those tables when + # purging/deleting a room (only deleting rows), there is nothing + # changed to care about. + # + # At worst, the cache might indicate a change at token X, at which + # point, we will query the database and discover nothing is there. + # + # Ideally, we would make it so that we could clear the cache on a + # more granular level but that's a bit complex and fiddly to do with + # room membership. + # + # self._membership_stream_cache.all_entities_changed(token) # type: ignore[attr-defined] else: self._attempt_to_invalidate_cache(row.cache_func, row.keys) @@ -271,20 +306,33 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "get_rooms_for_user", (data.state_key,) ) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", None + ) + self._membership_stream_cache.entity_has_changed(data.state_key, token) # type: ignore[attr-defined] elif data.type == EventTypes.RoomEncryption: self._attempt_to_invalidate_cache( "get_room_encryption", (data.room_id,) ) elif data.type == EventTypes.Create: self._attempt_to_invalidate_cache("get_room_type", (data.room_id,)) + + if (data.type, data.state_key) in SLIDING_SYNC_RELEVANT_STATE_SET: + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", None + ) elif row.type == EventsStreamAllStateRow.TypeId: assert isinstance(data, EventsStreamAllStateRow) # Similar to the above, but the entire caches are invalidated. This is # unfortunate for the membership caches, but should recover quickly. self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) # type: ignore[attr-defined] + self._membership_stream_cache.all_entities_changed(token) # type: ignore[attr-defined] self._attempt_to_invalidate_cache("get_rooms_for_user", None) self._attempt_to_invalidate_cache("get_room_type", (data.room_id,)) self._attempt_to_invalidate_cache("get_room_encryption", (data.room_id,)) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", None + ) else: raise Exception("Unknown events stream row type %s" % (row.type,)) @@ -312,6 +360,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "get_unread_event_push_actions_by_room_for_user", (room_id,) ) + self._attempt_to_invalidate_cache("get_metadata_for_event", (room_id, event_id)) + + self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,)) # The `_get_membership_from_event_id` is immutable, except for the # case where we look up an event *before* persisting it. @@ -344,6 +395,10 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "_get_rooms_for_local_user_where_membership_is_inner", (state_key,) ) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", + (state_key,), + ) self._attempt_to_invalidate_cache( "did_forget", @@ -360,6 +415,11 @@ class CacheInvalidationWorkerStore(SQLBaseStore): elif etype == EventTypes.RoomEncryption: self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) + if (etype, state_key) in SLIDING_SYNC_RELEVANT_STATE_SET: + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", None + ) + if relates_to: self._attempt_to_invalidate_cache( "get_relations_for_event", @@ -404,6 +464,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): ) self._attempt_to_invalidate_cache("get_relations_for_event", (room_id,)) + self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,)) + self._attempt_to_invalidate_cache("_get_membership_from_event_id", None) self._attempt_to_invalidate_cache("get_applicable_edit", None) self._attempt_to_invalidate_cache("get_thread_id", None) @@ -413,6 +475,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "_get_rooms_for_local_user_where_membership_is_inner", None ) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", None + ) self._attempt_to_invalidate_cache("did_forget", None) self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) self._attempt_to_invalidate_cache("get_references_for_event", None) @@ -425,6 +490,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("_get_state_group_for_event", None) self._attempt_to_invalidate_cache("get_event_ordering", None) + self._attempt_to_invalidate_cache("get_metadata_for_event", (room_id,)) self._attempt_to_invalidate_cache("is_partial_state_event", None) self._attempt_to_invalidate_cache("_get_joined_profile_from_event_id", None) @@ -450,6 +516,7 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_account_data_for_room", None) self._attempt_to_invalidate_cache("get_account_data_for_room_and_type", None) + self._attempt_to_invalidate_cache("get_tags_for_room", None) self._attempt_to_invalidate_cache("get_aliases_for_room", (room_id,)) self._attempt_to_invalidate_cache("get_latest_event_ids_in_room", (room_id,)) self._attempt_to_invalidate_cache("_get_forward_extremeties_for_room", None) @@ -469,6 +536,9 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache( "get_current_hosts_in_room_ordered", (room_id,) ) + self._attempt_to_invalidate_cache( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", None + ) self._attempt_to_invalidate_cache("did_forget", None) self._attempt_to_invalidate_cache("get_forgotten_rooms_for_user", None) self._attempt_to_invalidate_cache("_get_membership_from_event_id", None) @@ -476,6 +546,8 @@ class CacheInvalidationWorkerStore(SQLBaseStore): self._attempt_to_invalidate_cache("get_room_type", (room_id,)) self._attempt_to_invalidate_cache("get_room_encryption", (room_id,)) + self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,)) + # And delete state caches. self._invalidate_state_caches_all(room_id) diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index 4b66247640..69008804bd 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py
@@ -20,10 +20,19 @@ # import logging -from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Union, cast +from typing import ( + TYPE_CHECKING, + Dict, + List, + Mapping, + Optional, + Tuple, + TypedDict, + Union, + cast, +) import attr -from typing_extensions import TypedDict from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore @@ -238,9 +247,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): INNER JOIN user_ips USING (user_id, access_token, ip) GROUP BY user_id, access_token, ip HAVING count(*) > 1 - """.format( - clause - ), + """.format(clause), args, ) res = cast( @@ -373,9 +380,7 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): LIMIT ? ) c INNER JOIN user_ips AS u USING (user_id, device_id, last_seen) - """ % { - "where_clause": where_clause - } + """ % {"where_clause": where_clause} txn.execute(sql, where_args + [batch_size]) rows = cast(List[Tuple[int, str, str, str, str]], txn.fetchall()) @@ -645,9 +650,9 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke @wrap_as_background_process("update_client_ips") async def _update_client_ips_batch(self) -> None: - assert ( - self._update_on_this_worker - ), "This worker is not designated to update client IPs" + assert self._update_on_this_worker, ( + "This worker is not designated to update client IPs" + ) # If the DB pool has already terminated, don't try updating if not self.db_pool.is_running(): @@ -666,9 +671,9 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore, MonthlyActiveUsersWorke txn: LoggingTransaction, to_update: Mapping[Tuple[str, str, str], Tuple[str, Optional[str], int]], ) -> None: - assert ( - self._update_on_this_worker - ), "This worker is not designated to update client IPs" + assert self._update_on_this_worker, ( + "This worker is not designated to update client IPs" + ) # Keys and values for the `user_ips` upsert. user_ips_keys = [] diff --git a/synapse/storage/databases/main/delayed_events.py b/synapse/storage/databases/main/delayed_events.py new file mode 100644
index 0000000000..c88682d55c --- /dev/null +++ b/synapse/storage/databases/main/delayed_events.py
@@ -0,0 +1,549 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +import logging +from typing import List, NewType, Optional, Tuple + +import attr + +from synapse.api.errors import NotFoundError +from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage.database import LoggingTransaction, StoreError +from synapse.storage.engines import PostgresEngine +from synapse.types import JsonDict, RoomID +from synapse.util import json_encoder, stringutils as stringutils + +logger = logging.getLogger(__name__) + + +DelayID = NewType("DelayID", str) +UserLocalpart = NewType("UserLocalpart", str) +DeviceID = NewType("DeviceID", str) +EventType = NewType("EventType", str) +StateKey = NewType("StateKey", str) + +Delay = NewType("Delay", int) +Timestamp = NewType("Timestamp", int) + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class EventDetails: + room_id: RoomID + type: EventType + state_key: Optional[StateKey] + origin_server_ts: Optional[Timestamp] + content: JsonDict + device_id: Optional[DeviceID] + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class DelayedEventDetails(EventDetails): + delay_id: DelayID + user_localpart: UserLocalpart + + +class DelayedEventsStore(SQLBaseStore): + async def get_delayed_events_stream_pos(self) -> int: + """ + Gets the stream position of the background process to watch for state events + that target the same piece of state as any pending delayed events. + """ + return await self.db_pool.simple_select_one_onecol( + table="delayed_events_stream_pos", + keyvalues={}, + retcol="stream_id", + desc="get_delayed_events_stream_pos", + ) + + async def update_delayed_events_stream_pos(self, stream_id: Optional[int]) -> None: + """ + Updates the stream position of the background process to watch for state events + that target the same piece of state as any pending delayed events. + + Must only be used by the worker running the background process. + """ + await self.db_pool.simple_update_one( + table="delayed_events_stream_pos", + keyvalues={}, + updatevalues={"stream_id": stream_id}, + desc="update_delayed_events_stream_pos", + ) + + async def add_delayed_event( + self, + *, + user_localpart: str, + device_id: Optional[str], + creation_ts: Timestamp, + room_id: str, + event_type: str, + state_key: Optional[str], + origin_server_ts: Optional[int], + content: JsonDict, + delay: int, + ) -> Tuple[DelayID, Timestamp]: + """ + Inserts a new delayed event in the DB. + + Returns: The generated ID assigned to the added delayed event, + and the send time of the next delayed event to be sent, + which is either the event just added or one added earlier. + """ + delay_id = _generate_delay_id() + send_ts = Timestamp(creation_ts + delay) + + def add_delayed_event_txn(txn: LoggingTransaction) -> Timestamp: + self.db_pool.simple_insert_txn( + txn, + table="delayed_events", + values={ + "delay_id": delay_id, + "user_localpart": user_localpart, + "device_id": device_id, + "delay": delay, + "send_ts": send_ts, + "room_id": room_id, + "event_type": event_type, + "state_key": state_key, + "origin_server_ts": origin_server_ts, + "content": json_encoder.encode(content), + }, + ) + + next_send_ts = self._get_next_delayed_event_send_ts_txn(txn) + assert next_send_ts is not None + return next_send_ts + + next_send_ts = await self.db_pool.runInteraction( + "add_delayed_event", add_delayed_event_txn + ) + + return delay_id, next_send_ts + + async def restart_delayed_event( + self, + *, + delay_id: str, + user_localpart: str, + current_ts: Timestamp, + ) -> Timestamp: + """ + Restarts the send time of the matching delayed event, + as long as it hasn't already been marked for processing. + + Args: + delay_id: The ID of the delayed event to restart. + user_localpart: The localpart of the delayed event's owner. + current_ts: The current time, which will be used to calculate the new send time. + + Returns: The send time of the next delayed event to be sent, + which is either the event just restarted, or another one + with an earlier send time than the restarted one's new send time. + + Raises: + NotFoundError: if there is no matching delayed event. + """ + + def restart_delayed_event_txn( + txn: LoggingTransaction, + ) -> Timestamp: + txn.execute( + """ + UPDATE delayed_events + SET send_ts = ? + delay + WHERE delay_id = ? AND user_localpart = ? + AND NOT is_processed + """, + ( + current_ts, + delay_id, + user_localpart, + ), + ) + if txn.rowcount == 0: + raise NotFoundError("Delayed event not found") + + next_send_ts = self._get_next_delayed_event_send_ts_txn(txn) + assert next_send_ts is not None + return next_send_ts + + return await self.db_pool.runInteraction( + "restart_delayed_event", restart_delayed_event_txn + ) + + async def get_all_delayed_events_for_user( + self, + user_localpart: str, + ) -> List[JsonDict]: + """Returns all pending delayed events owned by the given user.""" + # TODO: Support Pagination stream API ("next_batch" field) + rows = await self.db_pool.execute( + "get_all_delayed_events_for_user", + """ + SELECT + delay_id, + room_id, + event_type, + state_key, + delay, + send_ts, + content + FROM delayed_events + WHERE user_localpart = ? AND NOT is_processed + ORDER BY send_ts + """, + user_localpart, + ) + return [ + { + "delay_id": DelayID(row[0]), + "room_id": str(RoomID.from_string(row[1])), + "type": EventType(row[2]), + **({"state_key": StateKey(row[3])} if row[3] is not None else {}), + "delay": Delay(row[4]), + "running_since": Timestamp(row[5] - row[4]), + "content": db_to_json(row[6]), + } + for row in rows + ] + + async def process_timeout_delayed_events( + self, current_ts: Timestamp + ) -> Tuple[ + List[DelayedEventDetails], + Optional[Timestamp], + ]: + """ + Marks for processing all delayed events that should have been sent prior to the provided time + that haven't already been marked as such. + + Returns: The details of all newly-processed delayed events, + and the send time of the next delayed event to be sent, if any. + """ + + def process_timeout_delayed_events_txn( + txn: LoggingTransaction, + ) -> Tuple[ + List[DelayedEventDetails], + Optional[Timestamp], + ]: + sql_cols = ", ".join( + ( + "delay_id", + "user_localpart", + "room_id", + "event_type", + "state_key", + "origin_server_ts", + "send_ts", + "content", + "device_id", + ) + ) + sql_update = "UPDATE delayed_events SET is_processed = TRUE" + sql_where = "WHERE send_ts <= ? AND NOT is_processed" + sql_args = (current_ts,) + sql_order = "ORDER BY send_ts" + if isinstance(self.database_engine, PostgresEngine): + # Do this only in Postgres because: + # - SQLite's RETURNING emits rows in an arbitrary order + # - https://www.sqlite.org/lang_returning.html#limitations_and_caveats + # - SQLite does not support data-modifying statements in a WITH clause + # - https://www.sqlite.org/lang_with.html + # - https://www.postgresql.org/docs/current/queries-with.html#QUERIES-WITH-MODIFYING + txn.execute( + f""" + WITH events_to_send AS ( + {sql_update} {sql_where} RETURNING * + ) SELECT {sql_cols} FROM events_to_send {sql_order} + """, + sql_args, + ) + rows = txn.fetchall() + else: + txn.execute( + f"SELECT {sql_cols} FROM delayed_events {sql_where} {sql_order}", + sql_args, + ) + rows = txn.fetchall() + txn.execute(f"{sql_update} {sql_where}", sql_args) + assert txn.rowcount == len(rows) + + events = [ + DelayedEventDetails( + RoomID.from_string(row[2]), + EventType(row[3]), + StateKey(row[4]) if row[4] is not None else None, + # If no custom_origin_ts is set, use send_ts as the event's timestamp + Timestamp(row[5] if row[5] is not None else row[6]), + db_to_json(row[7]), + DeviceID(row[8]) if row[8] is not None else None, + DelayID(row[0]), + UserLocalpart(row[1]), + ) + for row in rows + ] + next_send_ts = self._get_next_delayed_event_send_ts_txn(txn) + return events, next_send_ts + + return await self.db_pool.runInteraction( + "process_timeout_delayed_events", process_timeout_delayed_events_txn + ) + + async def process_target_delayed_event( + self, + *, + delay_id: str, + user_localpart: str, + ) -> Tuple[ + EventDetails, + Optional[Timestamp], + ]: + """ + Marks for processing the matching delayed event, regardless of its timeout time, + as long as it has not already been marked as such. + + Args: + delay_id: The ID of the delayed event to restart. + user_localpart: The localpart of the delayed event's owner. + + Returns: The details of the matching delayed event, + and the send time of the next delayed event to be sent, if any. + + Raises: + NotFoundError: if there is no matching delayed event. + """ + + def process_target_delayed_event_txn( + txn: LoggingTransaction, + ) -> Tuple[ + EventDetails, + Optional[Timestamp], + ]: + sql_cols = ", ".join( + ( + "room_id", + "event_type", + "state_key", + "origin_server_ts", + "content", + "device_id", + ) + ) + sql_update = "UPDATE delayed_events SET is_processed = TRUE" + sql_where = "WHERE delay_id = ? AND user_localpart = ? AND NOT is_processed" + sql_args = (delay_id, user_localpart) + txn.execute( + ( + f"{sql_update} {sql_where} RETURNING {sql_cols}" + if self.database_engine.supports_returning + else f"SELECT {sql_cols} FROM delayed_events {sql_where}" + ), + sql_args, + ) + row = txn.fetchone() + if row is None: + raise NotFoundError("Delayed event not found") + elif not self.database_engine.supports_returning: + txn.execute(f"{sql_update} {sql_where}", sql_args) + assert txn.rowcount == 1 + + event = EventDetails( + RoomID.from_string(row[0]), + EventType(row[1]), + StateKey(row[2]) if row[2] is not None else None, + Timestamp(row[3]) if row[3] is not None else None, + db_to_json(row[4]), + DeviceID(row[5]) if row[5] is not None else None, + ) + + return event, self._get_next_delayed_event_send_ts_txn(txn) + + return await self.db_pool.runInteraction( + "process_target_delayed_event", process_target_delayed_event_txn + ) + + async def cancel_delayed_event( + self, + *, + delay_id: str, + user_localpart: str, + ) -> Optional[Timestamp]: + """ + Cancels the matching delayed event, i.e. remove it as long as it hasn't been processed. + + Args: + delay_id: The ID of the delayed event to restart. + user_localpart: The localpart of the delayed event's owner. + + Returns: The send time of the next delayed event to be sent, if any. + + Raises: + NotFoundError: if there is no matching delayed event. + """ + + def cancel_delayed_event_txn( + txn: LoggingTransaction, + ) -> Optional[Timestamp]: + try: + self.db_pool.simple_delete_one_txn( + txn, + table="delayed_events", + keyvalues={ + "delay_id": delay_id, + "user_localpart": user_localpart, + "is_processed": False, + }, + ) + except StoreError: + if txn.rowcount == 0: + raise NotFoundError("Delayed event not found") + else: + raise + + return self._get_next_delayed_event_send_ts_txn(txn) + + return await self.db_pool.runInteraction( + "cancel_delayed_event", cancel_delayed_event_txn + ) + + async def cancel_delayed_state_events( + self, + *, + room_id: str, + event_type: str, + state_key: str, + not_from_localpart: str, + ) -> Optional[Timestamp]: + """ + Cancels all matching delayed state events, i.e. remove them as long as they haven't been processed. + + Args: + room_id: The room ID to match against. + event_type: The event type to match against. + state_key: The state key to match against. + not_from_localpart: The localpart of a user whose delayed events to not cancel. + If set to the empty string, any users' delayed events may be cancelled. + + Returns: The send time of the next delayed event to be sent, if any. + """ + + def cancel_delayed_state_events_txn( + txn: LoggingTransaction, + ) -> Optional[Timestamp]: + txn.execute( + """ + DELETE FROM delayed_events + WHERE room_id = ? AND event_type = ? AND state_key = ? + AND user_localpart <> ? + AND NOT is_processed + """, + ( + room_id, + event_type, + state_key, + not_from_localpart, + ), + ) + return self._get_next_delayed_event_send_ts_txn(txn) + + return await self.db_pool.runInteraction( + "cancel_delayed_state_events", cancel_delayed_state_events_txn + ) + + async def delete_processed_delayed_event( + self, + delay_id: DelayID, + user_localpart: UserLocalpart, + ) -> None: + """ + Delete the matching delayed event, as long as it has been marked as processed. + + Throws: + StoreError: if there is no matching delayed event, or if it has not yet been processed. + """ + return await self.db_pool.simple_delete_one( + table="delayed_events", + keyvalues={ + "delay_id": delay_id, + "user_localpart": user_localpart, + "is_processed": True, + }, + desc="delete_processed_delayed_event", + ) + + async def delete_processed_delayed_state_events( + self, + *, + room_id: str, + event_type: str, + state_key: str, + ) -> None: + """ + Delete the matching delayed state events that have been marked as processed. + """ + await self.db_pool.simple_delete( + table="delayed_events", + keyvalues={ + "room_id": room_id, + "event_type": event_type, + "state_key": state_key, + "is_processed": True, + }, + desc="delete_processed_delayed_state_events", + ) + + async def unprocess_delayed_events(self) -> None: + """ + Unmark all delayed events for processing. + """ + await self.db_pool.simple_update( + table="delayed_events", + keyvalues={"is_processed": True}, + updatevalues={"is_processed": False}, + desc="unprocess_delayed_events", + ) + + async def get_next_delayed_event_send_ts(self) -> Optional[Timestamp]: + """ + Returns the send time of the next delayed event to be sent, if any. + """ + return await self.db_pool.runInteraction( + "get_next_delayed_event_send_ts", + self._get_next_delayed_event_send_ts_txn, + db_autocommit=True, + ) + + def _get_next_delayed_event_send_ts_txn( + self, txn: LoggingTransaction + ) -> Optional[Timestamp]: + result = self.db_pool.simple_select_one_onecol_txn( + txn, + table="delayed_events", + keyvalues={"is_processed": False}, + retcol="MIN(send_ts)", + allow_none=True, + ) + return Timestamp(result) if result is not None else None + + +def _generate_delay_id() -> DelayID: + """Generates an opaque string, for use as a delay ID""" + + # We use the following format for delay IDs: + # syd_<random string> + # They are scoped to user localparts, so it is possible for + # the same ID to exist for multiple users. + + return DelayID(f"syd_{stringutils.random_string(20)}") diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 042d595ea0..d47833655d 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py
@@ -200,9 +200,9 @@ class DeviceInboxWorkerStore(SQLBaseStore): to_stream_id=to_stream_id, ) - assert ( - last_processed_stream_id == to_stream_id - ), "Expected _get_device_messages to process all to-device messages up to `to_stream_id`" + assert last_processed_stream_id == to_stream_id, ( + "Expected _get_device_messages to process all to-device messages up to `to_stream_id`" + ) return user_id_device_id_to_messages @@ -1116,7 +1116,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): txn.execute(sql, (start, stop)) - destinations = {d for d, in txn} + destinations = {d for (d,) in txn} to_remove = set() for d in destinations: try: diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index 53024bddc3..6191f22cd6 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py
@@ -27,6 +27,7 @@ from typing import ( Dict, Iterable, List, + Literal, Mapping, Optional, Set, @@ -35,7 +36,6 @@ from typing import ( ) from canonicaljson import encode_canonical_json -from typing_extensions import Literal from synapse.api.constants import EduTypes from synapse.api.errors import Codes, StoreError @@ -282,9 +282,10 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): "count_devices_by_users", count_devices_by_users_txn, user_ids ) + @cached() async def get_device( self, user_id: str, device_id: str - ) -> Optional[Dict[str, Any]]: + ) -> Optional[Mapping[str, Any]]: """Retrieve a device. Only returns devices that are not marked as hidden. @@ -670,9 +671,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): result["keys"] = keys device_display_name = None - if ( - self.hs.config.federation.allow_device_name_lookup_over_federation - ): + if self.hs.config.federation.allow_device_name_lookup_over_federation: device_display_name = device.display_name if device_display_name: result["device_display_name"] = device_display_name @@ -917,7 +916,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): from_key, to_key, ) - return {u for u, in rows} + return {u for (u,) in rows} @cancellable async def get_users_whose_devices_changed( @@ -968,7 +967,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): txn.database_engine, "user_id", chunk ) txn.execute(sql % (clause,), [from_key, to_key] + args) - changes.update(user_id for user_id, in txn) + changes.update(user_id for (user_id,) in txn) return changes @@ -1093,7 +1092,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ), ) - results: Dict[str, Optional[str]] = {user_id: None for user_id in user_ids} + results: Dict[str, Optional[str]] = dict.fromkeys(user_ids) results.update(rows) return results @@ -1424,7 +1423,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): DELETE FROM device_lists_outbound_last_success WHERE destination = ? AND user_id = ? """ - txn.execute_batch(sql, ((row[0], row[1]) for row in rows)) + txn.execute_batch(sql, [(row[0], row[1]) for row in rows]) logger.info("Pruned %d device list outbound pokes", count) @@ -1520,7 +1519,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): args: List[Any], ) -> Set[str]: txn.execute(sql.format(clause=clause), args) - return {user_id for user_id, in txn} + return {user_id for (user_id,) in txn} changes = set() for chunk in batch_iter(changed_room_ids, 1000): @@ -1560,7 +1559,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): txn: LoggingTransaction, ) -> Set[str]: txn.execute(sql, (from_id, to_id)) - return {room_id for room_id, in txn} + return {room_id for (room_id,) in txn} return await self.db_pool.runInteraction( "get_all_device_list_changes", @@ -1819,6 +1818,8 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): }, desc="store_device", ) + await self.invalidate_cache_and_stream("get_device", (user_id, device_id)) + if not inserted: # if the device already exists, check if it's a real device, or # if the device ID is reserved by something else @@ -1884,6 +1885,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): values=device_ids, keyvalues={"user_id": user_id}, ) + self._invalidate_cache_and_stream_bulk( + txn, self.get_device, [(user_id, device_id) for device_id in device_ids] + ) for batch in batch_iter(device_ids, 100): await self.db_pool.runInteraction( @@ -1917,6 +1921,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): updatevalues=updates, desc="update_device", ) + await self.invalidate_cache_and_stream("get_device", (user_id, device_id)) async def update_remote_device_list_cache_entry( self, user_id: str, device_id: str, content: JsonDict, stream_id: str diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py
index 4d6a921ab2..904ae5cb58 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py
@@ -19,9 +19,18 @@ # # -from typing import TYPE_CHECKING, Dict, Iterable, List, Mapping, Optional, Tuple, cast - -from typing_extensions import Literal, TypedDict +from typing import ( + TYPE_CHECKING, + Dict, + Iterable, + List, + Literal, + Mapping, + Optional, + Tuple, + TypedDict, + cast, +) from synapse.api.errors import StoreError from synapse.logging.opentracing import log_kv, trace @@ -387,9 +396,7 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): is_verified, session_data FROM e2e_room_keys WHERE user_id = ? AND version = ? AND (%s) - """ % ( - " OR ".join(where_clauses) - ) + """ % (" OR ".join(where_clauses)) txn.execute(sql, params) @@ -512,19 +519,16 @@ class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): # it isn't there. raise StoreError(404, "No backup with that version exists") - row = cast( - Tuple[int, str, str, Optional[int]], - self.db_pool.simple_select_one_txn( - txn, - table="e2e_room_keys_versions", - keyvalues={ - "user_id": user_id, - "version": this_version, - "deleted": 0, - }, - retcols=("version", "algorithm", "auth_data", "etag"), - allow_none=False, - ), + row = self.db_pool.simple_select_one_txn( + txn, + table="e2e_room_keys_versions", + keyvalues={ + "user_id": user_id, + "version": this_version, + "deleted": 0, + }, + retcols=("version", "algorithm", "auth_data", "etag"), + allow_none=False, ) return { "auth_data": db_to_json(row[2]), diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py
index 9e6c9561ae..341e7014d6 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py
@@ -27,6 +27,7 @@ from typing import ( Dict, Iterable, List, + Literal, Mapping, Optional, Sequence, @@ -39,7 +40,6 @@ from typing import ( import attr from canonicaljson import encode_canonical_json -from typing_extensions import Literal from synapse.api.constants import DeviceKeyAlgorithms from synapse.appservice import ( @@ -99,6 +99,13 @@ class EndToEndKeyBackgroundStore(SQLBaseStore): unique=True, ) + self.db_pool.updates.register_background_index_update( + update_name="add_otk_ts_added_index", + index_name="e2e_one_time_keys_json_user_id_device_id_algorithm_ts_added_idx", + table="e2e_one_time_keys_json", + columns=("user_id", "device_id", "algorithm", "ts_added_ms"), + ) + class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorkerStore): def __init__( @@ -472,9 +479,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker signature_sql = """ SELECT user_id, key_id, target_device_id, signature FROM e2e_cross_signing_signatures WHERE %s - """ % ( - " OR ".join("(" + q + ")" for q in signature_query_clauses) - ) + """ % (" OR ".join("(" + q + ")" for q in signature_query_clauses)) txn.execute(signature_sql, signature_query_params) return cast( @@ -917,9 +922,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker FROM e2e_cross_signing_keys WHERE %(clause)s ORDER BY user_id, keytype, stream_id DESC - """ % { - "clause": clause - } + """ % {"clause": clause} else: # SQLite has special handling for bare columns when using # MIN/MAX with a `GROUP BY` clause where it picks the value from @@ -929,9 +932,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker FROM e2e_cross_signing_keys WHERE %(clause)s GROUP BY user_id, keytype - """ % { - "clause": clause - } + """ % {"clause": clause} txn.execute(sql, params) @@ -1128,7 +1129,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker """Take a list of one time keys out of the database. Args: - query_list: An iterable of tuples of (user ID, device ID, algorithm). + query_list: An iterable of tuples of (user ID, device ID, algorithm, number of keys). Returns: A tuple (results, missing) of: @@ -1316,9 +1317,14 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker OTK was found. """ + # Return the oldest keys from this device (based on `ts_added_ms`). + # Doing so means that keys are issued in the same order they were uploaded, + # which reduces the chances of a client expiring its copy of a (private) + # key while the public key is still on the server, waiting to be issued. sql = """ SELECT key_id, key_json FROM e2e_one_time_keys_json WHERE user_id = ? AND device_id = ? AND algorithm = ? + ORDER BY ts_added_ms LIMIT ? """ @@ -1360,13 +1366,22 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker A list of tuples (user_id, device_id, algorithm, key_id, key_json) for each OTK claimed. """ + # Find, delete, and return the oldest keys from each device (based on + # `ts_added_ms`). + # + # Doing so means that keys are issued in the same order they were uploaded, + # which reduces the chances of a client expiring its copy of a (private) + # key while the public key is still on the server, waiting to be issued. sql = """ WITH claims(user_id, device_id, algorithm, claim_count) AS ( VALUES ? ), ranked_keys AS ( SELECT user_id, device_id, algorithm, key_id, claim_count, - ROW_NUMBER() OVER (PARTITION BY (user_id, device_id, algorithm)) AS r + ROW_NUMBER() OVER ( + PARTITION BY (user_id, device_id, algorithm) + ORDER BY ts_added_ms + ) AS r FROM e2e_one_time_keys_json JOIN claims USING (user_id, device_id, algorithm) ) @@ -1438,6 +1453,93 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker impl, ) + async def delete_old_otks_for_next_user_batch( + self, after_user_id: str, number_of_users: int + ) -> Tuple[List[str], int]: + """Deletes old OTKs belonging to the next batch of users + + Returns: + `(users, rows)`, where: + * `users` is the user IDs of the updated users. An empty list if we are done. + * `rows` is the number of deleted rows + """ + + def impl(txn: LoggingTransaction) -> Tuple[List[str], int]: + # Find a batch of users + txn.execute( + """ + SELECT DISTINCT(user_id) FROM e2e_one_time_keys_json + WHERE user_id > ? + ORDER BY user_id + LIMIT ? + """, + (after_user_id, number_of_users), + ) + users = [row[0] for row in txn.fetchall()] + if len(users) == 0: + return users, 0 + + # Delete any old OTKs belonging to those users. + # + # We only actually consider OTKs whose key ID is 6 characters long. These + # keys were likely made by libolm rather than Vodozemac; libolm only kept + # 100 private OTKs, so was far more vulnerable than Vodozemac to throwing + # away keys prematurely. + clause, args = make_in_list_sql_clause( + txn.database_engine, "user_id", users + ) + sql = f""" + DELETE FROM e2e_one_time_keys_json + WHERE {clause} AND ts_added_ms < ? AND length(key_id) = 6 + """ + args.append(self._clock.time_msec() - (7 * 24 * 3600 * 1000)) + txn.execute(sql, args) + + return users, txn.rowcount + + return await self.db_pool.runInteraction( + "delete_old_otks_for_next_user_batch", impl + ) + + async def allow_master_cross_signing_key_replacement_without_uia( + self, user_id: str, duration_ms: int + ) -> Optional[int]: + """Mark this user's latest master key as being replaceable without UIA. + + Said replacement will only be permitted for a short time after calling this + function. That time period is controlled by the duration argument. + + Returns: + None, if there is no such key. + Otherwise, the timestamp before which replacement is allowed without UIA. + """ + timestamp = self._clock.time_msec() + duration_ms + + def impl(txn: LoggingTransaction) -> Optional[int]: + txn.execute( + """ + UPDATE e2e_cross_signing_keys + SET updatable_without_uia_before_ms = ? + WHERE stream_id = ( + SELECT stream_id + FROM e2e_cross_signing_keys + WHERE user_id = ? AND keytype = 'master' + ORDER BY stream_id DESC + LIMIT 1 + ) + """, + (timestamp, user_id), + ) + if txn.rowcount == 0: + return None + + return timestamp + + return await self.db_pool.runInteraction( + "allow_master_cross_signing_key_replacement_without_uia", + impl, + ) + class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): def __init__( @@ -1692,42 +1794,3 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): ], desc="add_e2e_signing_key", ) - - async def allow_master_cross_signing_key_replacement_without_uia( - self, user_id: str, duration_ms: int - ) -> Optional[int]: - """Mark this user's latest master key as being replaceable without UIA. - - Said replacement will only be permitted for a short time after calling this - function. That time period is controlled by the duration argument. - - Returns: - None, if there is no such key. - Otherwise, the timestamp before which replacement is allowed without UIA. - """ - timestamp = self._clock.time_msec() + duration_ms - - def impl(txn: LoggingTransaction) -> Optional[int]: - txn.execute( - """ - UPDATE e2e_cross_signing_keys - SET updatable_without_uia_before_ms = ? - WHERE stream_id = ( - SELECT stream_id - FROM e2e_cross_signing_keys - WHERE user_id = ? AND keytype = 'master' - ORDER BY stream_id DESC - LIMIT 1 - ) - """, - (timestamp, user_id), - ) - if txn.rowcount == 0: - return None - - return timestamp - - return await self.db_pool.runInteraction( - "allow_master_cross_signing_key_replacement_without_uia", - impl, - ) diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index 715846865b..46aa5902d8 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py
@@ -326,7 +326,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas """ rows = txn.execute_values(sql, chains.items()) - results.update(r for r, in rows) + results.update(r for (r,) in rows) else: # For SQLite we just fall back to doing a noddy for loop. sql = """ @@ -335,7 +335,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas """ for chain_id, max_no in chains.items(): txn.execute(sql, (chain_id, max_no)) - results.update(r for r, in txn) + results.update(r for (r,) in txn) return results @@ -645,7 +645,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas ] rows = txn.execute_values(sql, args) - result.update(r for r, in rows) + result.update(r for (r,) in rows) else: # For SQLite we just fall back to doing a noddy for loop. sql = """ @@ -654,7 +654,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas """ for chain_id, (min_no, max_no) in chain_to_gap.items(): txn.execute(sql, (chain_id, min_no, max_no)) - result.update(r for r, in txn) + result.update(r for (r,) in txn) return result @@ -1220,13 +1220,11 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas HAVING count(*) > ? ORDER BY count(*) DESC LIMIT ? - """ % ( - where_clause, - ) + """ % (where_clause,) query_args = list(itertools.chain(room_id_filter, [min_count, limit])) txn.execute(sql, query_args) - return [room_id for room_id, in txn] + return [room_id for (room_id,) in txn] return await self.db_pool.runInteraction( "get_rooms_with_many_extremities", _get_rooms_with_many_extremities_txn @@ -1358,7 +1356,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas def get_forward_extremeties_for_room_txn(txn: LoggingTransaction) -> List[str]: txn.execute(sql, (stream_ordering, room_id)) - return [event_id for event_id, in txn] + return [event_id for (event_id,) in txn] event_ids = await self.db_pool.runInteraction( "get_forward_extremeties_for_room", get_forward_extremeties_for_room_txn diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py
index 0ebf5b53d5..6fb4a6df8c 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py
@@ -1034,97 +1034,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas # one of the subqueries may have hit the limit. return notifs[:limit] - async def get_unread_push_actions_for_user_in_range_for_email( - self, - user_id: str, - min_stream_ordering: int, - max_stream_ordering: int, - limit: int = 20, - ) -> List[EmailPushAction]: - """Get a list of the most recent unread push actions for a given user, - within the given stream ordering range. Called by the emailpusher - - Args: - user_id: The user to fetch push actions for. - min_stream_ordering: The exclusive lower bound on the - stream ordering of event push actions to fetch. - max_stream_ordering: The inclusive upper bound on the - stream ordering of event push actions to fetch. - limit: The maximum number of rows to return. - Returns: - A list of dicts with the keys "event_id", "room_id", "stream_ordering", "actions", "received_ts". - The list will be ordered by descending received_ts. - The list will have between 0~limit entries. - """ - - def get_push_actions_txn( - txn: LoggingTransaction, - ) -> List[Tuple[str, str, str, int, str, bool, int]]: - sql = """ - SELECT ep.event_id, ep.room_id, ep.thread_id, ep.stream_ordering, - ep.actions, ep.highlight, e.received_ts - FROM event_push_actions AS ep - INNER JOIN events AS e USING (room_id, event_id) - WHERE - ep.user_id = ? - AND ep.stream_ordering > ? - AND ep.stream_ordering <= ? - AND ep.notif = 1 - ORDER BY ep.stream_ordering DESC LIMIT ? - """ - txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit)) - return cast(List[Tuple[str, str, str, int, str, bool, int]], txn.fetchall()) - - push_actions = await self.db_pool.runInteraction( - "get_unread_push_actions_for_user_in_range_email", get_push_actions_txn - ) - - room_ids = set() - thread_ids = [] - for ( - _, - room_id, - thread_id, - _, - _, - _, - _, - ) in push_actions: - room_ids.add(room_id) - thread_ids.append(thread_id) - - receipts_by_room = await self.db_pool.runInteraction( - "get_unread_push_actions_for_user_in_range_email_receipts", - self._get_receipts_for_room_and_threads_txn, - user_id=user_id, - room_ids=room_ids, - thread_ids=thread_ids, - ) - - # Make a list of dicts from the two sets of results. - notifs = [ - EmailPushAction( - event_id=event_id, - room_id=room_id, - stream_ordering=stream_ordering, - actions=_deserialize_action(actions, highlight), - received_ts=received_ts, - ) - for event_id, room_id, thread_id, stream_ordering, actions, highlight, received_ts in push_actions - if receipts_by_room.get(room_id, MISSING_ROOM_RECEIPT).is_unread( - thread_id, stream_ordering - ) - ] - - # Now sort it so it's ordered correctly, since currently it will - # contain results from the first query, correctly ordered, followed - # by results from the second query, but we want them all ordered - # by received_ts (most recent first) - notifs.sort(key=lambda r: -(r.received_ts or 0)) - - # Now return the first `limit` - return notifs[:limit] - async def get_if_maybe_push_in_range_for_user( self, user_id: str, min_stream_ordering: int ) -> bool: @@ -1860,9 +1769,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas AND epa.notif = 1 ORDER BY epa.stream_ordering DESC LIMIT ? - """ % ( - before_clause, - ) + """ % (before_clause,) txn.execute(sql, args) return cast( List[Tuple[str, str, int, int, str, bool, str, int]], txn.fetchall() diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 1f7acdb859..b7cc0433e7 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py
@@ -32,8 +32,10 @@ from typing import ( Iterable, List, Optional, + Sequence, Set, Tuple, + TypedDict, cast, ) @@ -41,17 +43,24 @@ import attr from prometheus_client import Counter import synapse.metrics -from synapse.api.constants import EventContentFields, EventTypes, RelationTypes +from synapse.api.constants import ( + EventContentFields, + EventTypes, + Membership, + RelationTypes, +) from synapse.api.errors import PartialStateConflictError from synapse.api.room_versions import RoomVersions -from synapse.events import EventBase, relation_from_event +from synapse.events import EventBase, StrippedStateEvent, relation_from_event from synapse.events.snapshot import EventContext +from synapse.events.utils import parse_stripped_state_event from synapse.logging.opentracing import trace from synapse.storage._base import db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, LoggingTransaction, + make_tuple_in_list_sql_clause, ) from synapse.storage.databases.main.event_federation import EventFederationStore from synapse.storage.databases.main.events_worker import EventCacheEntry @@ -59,7 +68,15 @@ from synapse.storage.databases.main.search import SearchEntry from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import AbstractStreamIdGenerator from synapse.storage.util.sequence import SequenceGenerator -from synapse.types import JsonDict, StateMap, StrCollection, get_domain_from_id +from synapse.types import ( + JsonDict, + MutableStateMap, + StateMap, + StrCollection, + get_domain_from_id, +) +from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES +from synapse.types.state import StateFilter from synapse.util import json_encoder from synapse.util.iterutils import batch_iter, sorted_topologically from synapse.util.stringutils import non_null_str_or_none @@ -78,6 +95,19 @@ event_counter = Counter( ["type", "origin_type", "origin_entity"], ) +# State event type/key pairs that we need to gather to fill in the +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables. +SLIDING_SYNC_RELEVANT_STATE_SET = ( + # So we can fill in the `room_type` column + (EventTypes.Create, ""), + # So we can fill in the `is_encrypted` column + (EventTypes.RoomEncryption, ""), + # So we can fill in the `room_name` column + (EventTypes.Name, ""), + # So we can fill in the `tombstone_successor_room_id` column + (EventTypes.Tombstone, ""), +) + @attr.s(slots=True, auto_attribs=True) class DeltaState: @@ -99,6 +129,80 @@ class DeltaState: return not self.to_delete and not self.to_insert and not self.no_longer_in_room +# We want `total=False` because we want to allow values to be unset. +class SlidingSyncStateInsertValues(TypedDict, total=False): + """ + Insert values relevant for the `sliding_sync_joined_rooms` and + `sliding_sync_membership_snapshots` database tables. + """ + + room_type: Optional[str] + is_encrypted: Optional[bool] + room_name: Optional[str] + tombstone_successor_room_id: Optional[str] + + +class SlidingSyncMembershipSnapshotSharedInsertValues( + SlidingSyncStateInsertValues, total=False +): + """ + Insert values for `sliding_sync_membership_snapshots` that we can share across + multiple memberships + """ + + has_known_state: Optional[bool] + + +@attr.s(slots=True, auto_attribs=True) +class SlidingSyncMembershipInfo: + """ + Values unique to each membership + """ + + user_id: str + sender: str + membership_event_id: str + membership: str + + +@attr.s(slots=True, auto_attribs=True) +class SlidingSyncMembershipInfoWithEventPos(SlidingSyncMembershipInfo): + """ + SlidingSyncMembershipInfo + `stream_ordering`/`instance_name` of the membership + event + """ + + membership_event_stream_ordering: int + membership_event_instance_name: str + + +@attr.s(slots=True, auto_attribs=True) +class SlidingSyncTableChanges: + room_id: str + # If the row doesn't exist in the `sliding_sync_joined_rooms` table, we need to + # fully-insert it which means we also need to include a `bump_stamp` value to use + # for the row. This should only be populated when we're trying to fully-insert a + # row. + # + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + joined_room_bump_stamp_to_fully_insert: Optional[int] + # Values to upsert into `sliding_sync_joined_rooms` + joined_room_updates: SlidingSyncStateInsertValues + + # Shared values to upsert into `sliding_sync_membership_snapshots` for each + # `to_insert_membership_snapshots` + membership_snapshot_shared_insert_values: ( + SlidingSyncMembershipSnapshotSharedInsertValues + ) + # List of membership to insert into `sliding_sync_membership_snapshots` + to_insert_membership_snapshots: List[SlidingSyncMembershipInfo] + # List of user_id to delete from `sliding_sync_membership_snapshots` + to_delete_membership_snapshots: List[str] + + @attr.s(slots=True, auto_attribs=True) class NewEventChainLinks: """Information about new auth chain links that need to be added to the DB. @@ -142,9 +246,9 @@ class PersistEventsStore: self.is_mine_id = hs.is_mine_id # This should only exist on instances that are configured to write - assert ( - hs.get_instance_name() in hs.config.worker.writers.events - ), "Can only instantiate EventsStore on master" + assert hs.get_instance_name() in hs.config.worker.writers.events, ( + "Can only instantiate EventsStore on master" + ) # Since we have been configured to write, we ought to have id generators, # rather than id trackers. @@ -223,9 +327,24 @@ class PersistEventsStore: async with stream_ordering_manager as stream_orderings: for (event, _), stream in zip(events_and_contexts, stream_orderings): + # XXX: We can't rely on `stream_ordering`/`instance_name` being correct + # at this point. We could be working with events that were previously + # persisted as an `outlier` with one `stream_ordering` but are now being + # persisted again and de-outliered and are being assigned a different + # `stream_ordering` here that won't end up being used. + # `_update_outliers_txn()` will fix this discrepancy (always use the + # `stream_ordering` from the first time it was persisted). event.internal_metadata.stream_ordering = stream event.internal_metadata.instance_name = self._instance_name + sliding_sync_table_changes = None + if state_delta_for_room is not None: + sliding_sync_table_changes = ( + await self._calculate_sliding_sync_table_changes( + room_id, events_and_contexts, state_delta_for_room + ) + ) + await self.db_pool.runInteraction( "persist_events", self._persist_events_txn, @@ -235,6 +354,7 @@ class PersistEventsStore: state_delta_for_room=state_delta_for_room, new_forward_extremities=new_forward_extremities, new_event_links=new_event_links, + sliding_sync_table_changes=sliding_sync_table_changes, ) persist_event_counter.inc(len(events_and_contexts)) @@ -261,6 +381,301 @@ class PersistEventsStore: (room_id,), frozenset(new_forward_extremities) ) + async def _calculate_sliding_sync_table_changes( + self, + room_id: str, + events_and_contexts: Sequence[Tuple[EventBase, EventContext]], + delta_state: DeltaState, + ) -> SlidingSyncTableChanges: + """ + Calculate the changes to the `sliding_sync_membership_snapshots` and + `sliding_sync_joined_rooms` tables given the deltas that are going to be used to + update the `current_state_events` table. + + Just a bunch of pre-processing so we so we don't need to spend time in the + transaction itself gathering all of this info. It's also easier to deal with + redactions outside of a transaction. + + Args: + room_id: The room ID currently being processed. + events_and_contexts: List of tuples of (event, context) being persisted. + This is completely optional (you can pass an empty list) and will just + save us from fetching the events from the database if we already have + them. We assume the list is sorted ascending by `stream_ordering`. We + don't care about the sort when the events are backfilled (with negative + `stream_ordering`). + delta_state: Deltas that are going to be used to update the + `current_state_events` table. Changes to the current state of the room. + + Returns: + SlidingSyncTableChanges + """ + to_insert = delta_state.to_insert + to_delete = delta_state.to_delete + + # If no state is changing, we don't need to do anything. This can happen when a + # partial-stated room is re-syncing the current state. + if not to_insert and not to_delete: + return SlidingSyncTableChanges( + room_id=room_id, + joined_room_bump_stamp_to_fully_insert=None, + joined_room_updates={}, + membership_snapshot_shared_insert_values={}, + to_insert_membership_snapshots=[], + to_delete_membership_snapshots=[], + ) + + event_map = {event.event_id: event for event, _ in events_and_contexts} + + # Handle gathering info for the `sliding_sync_membership_snapshots` table + # + # This would only happen if someone was state reset out of the room + user_ids_to_delete_membership_snapshots = [ + state_key + for event_type, state_key in to_delete + if event_type == EventTypes.Member and self.is_mine_id(state_key) + ] + + membership_snapshot_shared_insert_values: SlidingSyncMembershipSnapshotSharedInsertValues = {} + membership_infos_to_insert_membership_snapshots: List[ + SlidingSyncMembershipInfo + ] = [] + if to_insert: + membership_event_id_to_user_id_map: Dict[str, str] = {} + for state_key, event_id in to_insert.items(): + if state_key[0] == EventTypes.Member and self.is_mine_id(state_key[1]): + membership_event_id_to_user_id_map[event_id] = state_key[1] + + membership_event_map: Dict[str, EventBase] = {} + # In normal event persist scenarios, we should be able to find the + # membership events in the `events_and_contexts` given to us but it's + # possible a state reset happened which added us to the room without a + # corresponding new membership event (reset back to a previous membership). + missing_membership_event_ids: Set[str] = set() + for membership_event_id in membership_event_id_to_user_id_map.keys(): + membership_event = event_map.get(membership_event_id) + if membership_event: + membership_event_map[membership_event_id] = membership_event + else: + missing_membership_event_ids.add(membership_event_id) + + # Otherwise, we need to find a couple events that we were reset to. + if missing_membership_event_ids: + remaining_events = await self.store.get_events( + missing_membership_event_ids + ) + # There shouldn't be any missing events + assert remaining_events.keys() == missing_membership_event_ids, ( + missing_membership_event_ids.difference(remaining_events.keys()) + ) + membership_event_map.update(remaining_events) + + for ( + membership_event_id, + user_id, + ) in membership_event_id_to_user_id_map.items(): + membership_infos_to_insert_membership_snapshots.append( + # XXX: We don't use `SlidingSyncMembershipInfoWithEventPos` here + # because we're sourcing the event from `events_and_contexts`, we + # can't rely on `stream_ordering`/`instance_name` being correct at + # this point. We could be working with events that were previously + # persisted as an `outlier` with one `stream_ordering` but are now + # being persisted again and de-outliered and assigned a different + # `stream_ordering` that won't end up being used. Since we call + # `_calculate_sliding_sync_table_changes()` before + # `_update_outliers_txn()` which fixes this discrepancy (always use + # the `stream_ordering` from the first time it was persisted), we're + # working with an unreliable `stream_ordering` value that will + # possibly be unused and not make it into the `events` table. + SlidingSyncMembershipInfo( + user_id=user_id, + sender=membership_event_map[membership_event_id].sender, + membership_event_id=membership_event_id, + membership=membership_event_map[membership_event_id].membership, + ) + ) + + if membership_infos_to_insert_membership_snapshots: + current_state_ids_map: MutableStateMap[str] = dict( + await self.store.get_partial_filtered_current_state_ids( + room_id, + state_filter=StateFilter.from_types( + SLIDING_SYNC_RELEVANT_STATE_SET + ), + ) + ) + # Since we fetched the current state before we took `to_insert`/`to_delete` + # into account, we need to do a couple fixups. + # + # Update the current_state_map with what we have `to_delete` + for state_key in to_delete: + current_state_ids_map.pop(state_key, None) + # Update the current_state_map with what we have `to_insert` + for state_key, event_id in to_insert.items(): + if state_key in SLIDING_SYNC_RELEVANT_STATE_SET: + current_state_ids_map[state_key] = event_id + + current_state_map: MutableStateMap[EventBase] = {} + # In normal event persist scenarios, we probably won't be able to find + # these state events in `events_and_contexts` since we don't generally + # batch up local membership changes with other events, but it can + # happen. + missing_state_event_ids: Set[str] = set() + for state_key, event_id in current_state_ids_map.items(): + event = event_map.get(event_id) + if event: + current_state_map[state_key] = event + else: + missing_state_event_ids.add(event_id) + + # Otherwise, we need to find a couple events + if missing_state_event_ids: + remaining_events = await self.store.get_events( + missing_state_event_ids + ) + # There shouldn't be any missing events + assert remaining_events.keys() == missing_state_event_ids, ( + missing_state_event_ids.difference(remaining_events.keys()) + ) + for event in remaining_events.values(): + current_state_map[(event.type, event.state_key)] = event + + if current_state_map: + state_insert_values = PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + current_state_map + ) + membership_snapshot_shared_insert_values.update(state_insert_values) + # We have current state to work from + membership_snapshot_shared_insert_values["has_known_state"] = True + else: + # We don't have any `current_state_events` anymore (previously + # cleared out because of `no_longer_in_room`). This can happen if + # one user is joined and another is invited (some non-join + # membership). If the joined user leaves, we are `no_longer_in_room` + # and `current_state_events` is cleared out. When the invited user + # rejects the invite (leaves the room), we will end up here. + # + # In these cases, we should inherit the meta data from the previous + # snapshot so we shouldn't update any of the state values. When + # using sliding sync filters, this will prevent the room from + # disappearing/appearing just because you left the room. + # + # Ideally, we could additionally assert that we're only here for + # valid non-join membership transitions. + assert delta_state.no_longer_in_room + + # Handle gathering info for the `sliding_sync_joined_rooms` table + # + # We only deal with + # updating the state related columns. The + # `event_stream_ordering`/`bump_stamp` are updated elsewhere in the event + # persisting stack (see + # `_update_sliding_sync_tables_with_new_persisted_events_txn()`) + # + joined_room_updates: SlidingSyncStateInsertValues = {} + bump_stamp_to_fully_insert: Optional[int] = None + if not delta_state.no_longer_in_room: + current_state_ids_map = {} + + # Always fully-insert rows if they don't already exist in the + # `sliding_sync_joined_rooms` table. This way we can rely on a row if it + # exists in the table. + # + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + existing_row_in_table = await self.store.db_pool.simple_select_one_onecol( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + retcol="room_id", + allow_none=True, + ) + if not existing_row_in_table: + most_recent_bump_event_pos_results = ( + await self.store.get_last_event_pos_in_room( + room_id, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) + ) + if most_recent_bump_event_pos_results is not None: + _, new_bump_event_pos = most_recent_bump_event_pos_results + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead just leave it as `None` in the table and we will use their + # membership event position as the bump event position in the + # Sliding Sync API. + if new_bump_event_pos.stream > 0: + bump_stamp_to_fully_insert = new_bump_event_pos.stream + + current_state_ids_map = dict( + await self.store.get_partial_filtered_current_state_ids( + room_id, + state_filter=StateFilter.from_types( + SLIDING_SYNC_RELEVANT_STATE_SET + ), + ) + ) + + # Look through the items we're going to insert into the current state to see + # if there is anything that we care about and should also update in the + # `sliding_sync_joined_rooms` table. + for state_key, event_id in to_insert.items(): + if state_key in SLIDING_SYNC_RELEVANT_STATE_SET: + current_state_ids_map[state_key] = event_id + + # Get the full event objects for the current state events + # + # In normal event persist scenarios, we should be able to find the state + # events in the `events_and_contexts` given to us but it's possible a state + # reset happened which that reset back to a previous state. + current_state_map = {} + missing_event_ids: Set[str] = set() + for state_key, event_id in current_state_ids_map.items(): + event = event_map.get(event_id) + if event: + current_state_map[state_key] = event + else: + missing_event_ids.add(event_id) + + # Otherwise, we need to find a couple events that we were reset to. + if missing_event_ids: + remaining_events = await self.store.get_events(missing_event_ids) + # There shouldn't be any missing events + assert remaining_events.keys() == missing_event_ids, ( + missing_event_ids.difference(remaining_events.keys()) + ) + for event in remaining_events.values(): + current_state_map[(event.type, event.state_key)] = event + + joined_room_updates = ( + PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + current_state_map + ) + ) + + # If something is being deleted from the state, we need to clear it out + for state_key in to_delete: + if state_key == (EventTypes.Create, ""): + joined_room_updates["room_type"] = None + elif state_key == (EventTypes.RoomEncryption, ""): + joined_room_updates["is_encrypted"] = False + elif state_key == (EventTypes.Name, ""): + joined_room_updates["room_name"] = None + + return SlidingSyncTableChanges( + room_id=room_id, + # For `sliding_sync_joined_rooms` + joined_room_bump_stamp_to_fully_insert=bump_stamp_to_fully_insert, + joined_room_updates=joined_room_updates, + # For `sliding_sync_membership_snapshots` + membership_snapshot_shared_insert_values=membership_snapshot_shared_insert_values, + to_insert_membership_snapshots=membership_infos_to_insert_membership_snapshots, + to_delete_membership_snapshots=user_ids_to_delete_membership_snapshots, + ) + async def calculate_chain_cover_index_for_events( self, room_id: str, events: Collection[EventBase] ) -> Dict[str, NewEventChainLinks]: @@ -315,7 +730,7 @@ class PersistEventsStore: keyvalues={}, retcols=("event_id",), ) - already_persisted_events = {event_id for event_id, in rows} + already_persisted_events = {event_id for (event_id,) in rows} state_events = [ event for event in state_events @@ -458,6 +873,7 @@ class PersistEventsStore: state_delta_for_room: Optional[DeltaState], new_forward_extremities: Optional[Set[str]], new_event_links: Dict[str, NewEventChainLinks], + sliding_sync_table_changes: Optional[SlidingSyncTableChanges], ) -> None: """Insert some number of room events into the necessary database tables. @@ -478,9 +894,14 @@ class PersistEventsStore: delete_existing True to purge existing table rows for the events from the database. This is useful when retrying due to IntegrityError. - state_delta_for_room: The current-state delta for the room. + state_delta_for_room: Deltas that are going to be used to update the + `current_state_events` table. Changes to the current state of the room. new_forward_extremities: The new forward extremities for the room: a set of the event ids which are the forward extremities. + sliding_sync_table_changes: Changes to the + `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` tables + derived from the given `delta_state` (see + `_calculate_sliding_sync_table_changes(...)`) Raises: PartialStateConflictError: if attempting to persist a partial state event in @@ -590,10 +1011,22 @@ class PersistEventsStore: # room_memberships, where applicable. # NB: This function invalidates all state related caches if state_delta_for_room: + # If the state delta exists, the sliding sync table changes should also exist + assert sliding_sync_table_changes is not None + self._update_current_state_txn( - txn, room_id, state_delta_for_room, min_stream_order + txn, + room_id, + state_delta_for_room, + min_stream_order, + sliding_sync_table_changes, ) + # We only update the sliding sync tables for non-backfilled events. + self._update_sliding_sync_tables_with_new_persisted_events_txn( + txn, room_id, events_and_contexts + ) + def _persist_event_auth_chain_txn( self, txn: LoggingTransaction, @@ -1128,8 +1561,20 @@ class PersistEventsStore: self, room_id: str, state_delta: DeltaState, + sliding_sync_table_changes: SlidingSyncTableChanges, ) -> None: - """Update the current state stored in the datatabase for the given room""" + """ + Update the current state stored in the datatabase for the given room + + Args: + room_id + state_delta: Deltas that are going to be used to update the + `current_state_events` table. Changes to the current state of the room. + sliding_sync_table_changes: Changes to the + `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` tables + derived from the given `delta_state` (see + `_calculate_sliding_sync_table_changes(...)`) + """ if state_delta.is_noop(): return @@ -1141,6 +1586,7 @@ class PersistEventsStore: room_id, delta_state=state_delta, stream_id=stream_ordering, + sliding_sync_table_changes=sliding_sync_table_changes, ) def _update_current_state_txn( @@ -1149,16 +1595,40 @@ class PersistEventsStore: room_id: str, delta_state: DeltaState, stream_id: int, + sliding_sync_table_changes: SlidingSyncTableChanges, ) -> None: + """ + Handles updating tables that track the current state of a room. + + Args: + txn + room_id + delta_state: Deltas that are going to be used to update the + `current_state_events` table. Changes to the current state of the room. + stream_id: This is expected to be the minimum `stream_ordering` for the + batch of events that we are persisting; which means we do not end up in a + situation where workers see events before the `current_state_delta` updates. + FIXME: However, this function also gets called with next upcoming + `stream_ordering` when we re-sync the state of a partial stated room (see + `update_current_state(...)`) which may be "correct" but it would be good to + nail down what exactly is the expected value here. + sliding_sync_table_changes: Changes to the + `sliding_sync_membership_snapshots` and `sliding_sync_joined_rooms` tables + derived from the given `delta_state` (see + `_calculate_sliding_sync_table_changes(...)`) + """ to_delete = delta_state.to_delete to_insert = delta_state.to_insert + # Sanity check we're processing the same thing + assert room_id == sliding_sync_table_changes.room_id + # Figure out the changes of membership to invalidate the # `get_rooms_for_user` cache. # We find out which membership events we may have deleted # and which we have added, then we invalidate the caches for all # those users. - members_changed = { + members_to_cache_bust = { state_key for ev_type, state_key in itertools.chain(to_delete, to_insert) if ev_type == EventTypes.Member @@ -1182,16 +1652,22 @@ class PersistEventsStore: """ txn.execute(sql, (stream_id, self._instance_name, room_id)) + # Grab the list of users before we clear out the current state + users_in_room = self.store.get_users_in_room_txn(txn, room_id) # We also want to invalidate the membership caches for users # that were in the room. - users_in_room = self.store.get_users_in_room_txn(txn, room_id) - members_changed.update(users_in_room) + members_to_cache_bust.update(users_in_room) self.db_pool.simple_delete_txn( txn, table="current_state_events", keyvalues={"room_id": room_id}, ) + self.db_pool.simple_delete_txn( + txn, + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + ) else: # We're still in the room, so we update the current state as normal. @@ -1216,7 +1692,7 @@ class PersistEventsStore: """ txn.execute_batch( sql, - ( + [ ( stream_id, self._instance_name, @@ -1229,17 +1705,17 @@ class PersistEventsStore: state_key, ) for etype, state_key in itertools.chain(to_delete, to_insert) - ), + ], ) # Now we actually update the current_state_events table txn.execute_batch( "DELETE FROM current_state_events" " WHERE room_id = ? AND type = ? AND state_key = ?", - ( + [ (room_id, etype, state_key) for etype, state_key in itertools.chain(to_delete, to_insert) - ), + ], ) # We include the membership in the current state table, hence we do @@ -1260,6 +1736,63 @@ class PersistEventsStore: ], ) + # Handle updating the `sliding_sync_joined_rooms` table. We only deal with + # updating the state related columns. The + # `event_stream_ordering`/`bump_stamp` are updated elsewhere in the event + # persisting stack (see + # `_update_sliding_sync_tables_with_new_persisted_events_txn()`) + # + # We only need to update when one of the relevant state values has changed + if sliding_sync_table_changes.joined_room_updates: + sliding_sync_updates_keys = ( + sliding_sync_table_changes.joined_room_updates.keys() + ) + sliding_sync_updates_values = ( + sliding_sync_table_changes.joined_room_updates.values() + ) + + args: List[Any] = [ + room_id, + room_id, + sliding_sync_table_changes.joined_room_bump_stamp_to_fully_insert, + ] + args.extend(iter(sliding_sync_updates_values)) + + # XXX: We use a sub-query for `stream_ordering` because it's unreliable to + # pre-calculate from `events_and_contexts` at the time when + # `_calculate_sliding_sync_table_changes()` is ran. We could be working + # with events that were previously persisted as an `outlier` with one + # `stream_ordering` but are now being persisted again and de-outliered + # and assigned a different `stream_ordering`. Since we call + # `_calculate_sliding_sync_table_changes()` before + # `_update_outliers_txn()` which fixes this discrepancy (always use the + # `stream_ordering` from the first time it was persisted), we're working + # with an unreliable `stream_ordering` value that will possibly be + # unused and not make it into the `events` table. + # + # We don't update `event_stream_ordering` `ON CONFLICT` because it's + # simpler and we can just rely on + # `_update_sliding_sync_tables_with_new_persisted_events_txn()` to do + # the right thing (same for `bump_stamp`). The only reason we're + # inserting `event_stream_ordering` here is because the column has a + # `NON NULL` constraint and we need some answer. + txn.execute( + f""" + INSERT INTO sliding_sync_joined_rooms + (room_id, event_stream_ordering, bump_stamp, {", ".join(sliding_sync_updates_keys)}) + VALUES ( + ?, + (SELECT stream_ordering FROM events WHERE room_id = ? ORDER BY stream_ordering DESC LIMIT 1), + ?, + {", ".join("?" for _ in sliding_sync_updates_values)} + ) + ON CONFLICT (room_id) + DO UPDATE SET + {", ".join(f"{key} = EXCLUDED.{key}" for key in sliding_sync_updates_keys)} + """, + args, + ) + # We now update `local_current_membership`. We do this regardless # of whether we're still in the room or not to handle the case where # e.g. we just got banned (where we need to record that fact here). @@ -1272,11 +1805,11 @@ class PersistEventsStore: txn.execute_batch( "DELETE FROM local_current_membership" " WHERE room_id = ? AND user_id = ?", - ( + [ (room_id, state_key) for etype, state_key in itertools.chain(to_delete, to_insert) if etype == EventTypes.Member and self.is_mine_id(state_key) - ), + ], ) if to_insert: @@ -1296,20 +1829,422 @@ class PersistEventsStore: ], ) + # Handle updating the `sliding_sync_membership_snapshots` table + # + # This would only happen if someone was state reset out of the room + if sliding_sync_table_changes.to_delete_membership_snapshots: + self.db_pool.simple_delete_many_txn( + txn, + table="sliding_sync_membership_snapshots", + column="user_id", + values=sliding_sync_table_changes.to_delete_membership_snapshots, + keyvalues={"room_id": room_id}, + ) + + # We do this regardless of whether the server is `no_longer_in_room` or not + # because we still want a row if a local user was just left/kicked or got banned + # from the room. + if sliding_sync_table_changes.to_insert_membership_snapshots: + # Update the `sliding_sync_membership_snapshots` table + # + sliding_sync_snapshot_keys = sliding_sync_table_changes.membership_snapshot_shared_insert_values.keys() + sliding_sync_snapshot_values = sliding_sync_table_changes.membership_snapshot_shared_insert_values.values() + # We need to insert/update regardless of whether we have + # `sliding_sync_snapshot_keys` because there are other fields in the `ON + # CONFLICT` upsert to run (see inherit case (explained in + # `_calculate_sliding_sync_table_changes()`) for more context when this + # happens). + # + # XXX: We use a sub-query for `stream_ordering` because it's unreliable to + # pre-calculate from `events_and_contexts` at the time when + # `_calculate_sliding_sync_table_changes()` is ran. We could be working with + # events that were previously persisted as an `outlier` with one + # `stream_ordering` but are now being persisted again and de-outliered and + # assigned a different `stream_ordering` that won't end up being used. Since + # we call `_calculate_sliding_sync_table_changes()` before + # `_update_outliers_txn()` which fixes this discrepancy (always use the + # `stream_ordering` from the first time it was persisted), we're working + # with an unreliable `stream_ordering` value that will possibly be unused + # and not make it into the `events` table. + txn.execute_batch( + f""" + INSERT INTO sliding_sync_membership_snapshots + (room_id, user_id, sender, membership_event_id, membership, forgotten, event_stream_ordering, event_instance_name + {("," + ", ".join(sliding_sync_snapshot_keys)) if sliding_sync_snapshot_keys else ""}) + VALUES ( + ?, ?, ?, ?, ?, ?, + (SELECT stream_ordering FROM events WHERE event_id = ?), + (SELECT COALESCE(instance_name, 'master') FROM events WHERE event_id = ?) + {("," + ", ".join("?" for _ in sliding_sync_snapshot_values)) if sliding_sync_snapshot_values else ""} + ) + ON CONFLICT (room_id, user_id) + DO UPDATE SET + sender = EXCLUDED.sender, + membership_event_id = EXCLUDED.membership_event_id, + membership = EXCLUDED.membership, + forgotten = EXCLUDED.forgotten, + event_stream_ordering = EXCLUDED.event_stream_ordering + {("," + ", ".join(f"{key} = EXCLUDED.{key}" for key in sliding_sync_snapshot_keys)) if sliding_sync_snapshot_keys else ""} + """, + [ + [ + room_id, + membership_info.user_id, + membership_info.sender, + membership_info.membership_event_id, + membership_info.membership, + # Since this is a new membership, it isn't forgotten anymore (which + # matches how Synapse currently thinks about the forgotten status) + 0, + # XXX: We do not use `membership_info.membership_event_stream_ordering` here + # because it is an unreliable value. See XXX note above. + membership_info.membership_event_id, + # XXX: We do not use `membership_info.membership_event_instance_name` here + # because it is an unreliable value. See XXX note above. + membership_info.membership_event_id, + ] + + list(sliding_sync_snapshot_values) + for membership_info in sliding_sync_table_changes.to_insert_membership_snapshots + ], + ) + txn.call_after( self.store._curr_state_delta_stream_cache.entity_has_changed, room_id, stream_id, ) + for user_id in members_to_cache_bust: + txn.call_after( + self.store._membership_stream_cache.entity_has_changed, + user_id, + stream_id, + ) + # Invalidate the various caches - self.store._invalidate_state_caches_and_stream(txn, room_id, members_changed) + self.store._invalidate_state_caches_and_stream( + txn, room_id, members_to_cache_bust + ) # Check if any of the remote membership changes requires us to # unsubscribe from their device lists. self.store.handle_potentially_left_users_txn( - txn, {m for m in members_changed if not self.hs.is_mine_id(m)} + txn, {m for m in members_to_cache_bust if not self.hs.is_mine_id(m)} + ) + + @classmethod + def _get_relevant_sliding_sync_current_state_event_ids_txn( + cls, txn: LoggingTransaction, room_id: str + ) -> MutableStateMap[str]: + """ + Fetch the current state event IDs for the relevant (to the + `sliding_sync_joined_rooms` table) state types for the given room. + + Returns: + A tuple of: + 1. StateMap of event IDs necessary to to fetch the relevant state values + needed to insert into the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots`. + 2. The corresponding latest `stream_id` in the + `current_state_delta_stream` table. This is useful to compare against + the `current_state_delta_stream` table later so you can check whether + the current state has changed since you last fetched the current + state. + """ + # Fetch the current state event IDs from the database + ( + event_type_and_state_key_in_list_clause, + event_type_and_state_key_args, + ) = make_tuple_in_list_sql_clause( + txn.database_engine, + ("type", "state_key"), + SLIDING_SYNC_RELEVANT_STATE_SET, + ) + txn.execute( + f""" + SELECT c.event_id, c.type, c.state_key + FROM current_state_events AS c + WHERE + c.room_id = ? + AND {event_type_and_state_key_in_list_clause} + """, + [room_id] + event_type_and_state_key_args, ) + current_state_map: MutableStateMap[str] = { + (event_type, state_key): event_id for event_id, event_type, state_key in txn + } + + return current_state_map + + @classmethod + def _get_sliding_sync_insert_values_from_state_map( + cls, state_map: StateMap[EventBase] + ) -> SlidingSyncStateInsertValues: + """ + Extract the relevant state values from the `state_map` needed to insert into the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables. + + Returns: + Map from column names (`room_type`, `is_encrypted`, `room_name`) to relevant + state values needed to insert into + the `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables. + """ + # Map of values to insert/update in the `sliding_sync_membership_snapshots` table + sliding_sync_insert_map: SlidingSyncStateInsertValues = {} + + # Parse the raw event JSON + for state_key, event in state_map.items(): + if state_key == (EventTypes.Create, ""): + room_type = event.content.get(EventContentFields.ROOM_TYPE) + # Scrutinize JSON values + if room_type is None or ( + isinstance(room_type, str) + # We ignore values with null bytes as Postgres doesn't allow them in + # text columns. + and "\0" not in room_type + ): + sliding_sync_insert_map["room_type"] = room_type + elif state_key == (EventTypes.RoomEncryption, ""): + encryption_algorithm = event.content.get( + EventContentFields.ENCRYPTION_ALGORITHM + ) + is_encrypted = encryption_algorithm is not None + sliding_sync_insert_map["is_encrypted"] = is_encrypted + elif state_key == (EventTypes.Name, ""): + room_name = event.content.get(EventContentFields.ROOM_NAME) + # Scrutinize JSON values. We ignore values with nulls as + # postgres doesn't allow null bytes in text columns. + if room_name is None or ( + isinstance(room_name, str) + # We ignore values with null bytes as Postgres doesn't allow them in + # text columns. + and "\0" not in room_name + ): + sliding_sync_insert_map["room_name"] = room_name + elif state_key == (EventTypes.Tombstone, ""): + successor_room_id = event.content.get( + EventContentFields.TOMBSTONE_SUCCESSOR_ROOM + ) + # Scrutinize JSON values + if successor_room_id is None or ( + isinstance(successor_room_id, str) + # We ignore values with null bytes as Postgres doesn't allow them in + # text columns. + and "\0" not in successor_room_id + ): + sliding_sync_insert_map["tombstone_successor_room_id"] = ( + successor_room_id + ) + else: + # We only expect to see events according to the + # `SLIDING_SYNC_RELEVANT_STATE_SET`. + raise AssertionError( + "Unexpected event (we should not be fetching extra events or this " + + "piece of code needs to be updated to handle a new event type added " + + "to `SLIDING_SYNC_RELEVANT_STATE_SET`): {state_key} {event.event_id}" + ) + + return sliding_sync_insert_map + + @classmethod + def _get_sliding_sync_insert_values_from_stripped_state( + cls, unsigned_stripped_state_events: Any + ) -> SlidingSyncMembershipSnapshotSharedInsertValues: + """ + Pull out the relevant state values from the stripped state on an invite or knock + membership event needed to insert into the `sliding_sync_membership_snapshots` + tables. + + Returns: + Map from column names (`room_type`, `is_encrypted`, `room_name`) to relevant + state values needed to insert into the `sliding_sync_membership_snapshots` tables. + """ + # Map of values to insert/update in the `sliding_sync_membership_snapshots` table + sliding_sync_insert_map: SlidingSyncMembershipSnapshotSharedInsertValues = {} + + if unsigned_stripped_state_events is not None: + stripped_state_map: MutableStateMap[StrippedStateEvent] = {} + if isinstance(unsigned_stripped_state_events, list): + for raw_stripped_event in unsigned_stripped_state_events: + stripped_state_event = parse_stripped_state_event( + raw_stripped_event + ) + if stripped_state_event is not None: + stripped_state_map[ + ( + stripped_state_event.type, + stripped_state_event.state_key, + ) + ] = stripped_state_event + + # If there is some stripped state, we assume the remote server passed *all* + # of the potential stripped state events for the room. + create_stripped_event = stripped_state_map.get((EventTypes.Create, "")) + # Sanity check that we at-least have the create event + if create_stripped_event is not None: + sliding_sync_insert_map["has_known_state"] = True + + # XXX: Keep this up-to-date with `SLIDING_SYNC_RELEVANT_STATE_SET` + + # Find the room_type + sliding_sync_insert_map["room_type"] = ( + create_stripped_event.content.get(EventContentFields.ROOM_TYPE) + if create_stripped_event is not None + else None + ) + + # Find whether the room is_encrypted + encryption_stripped_event = stripped_state_map.get( + (EventTypes.RoomEncryption, "") + ) + encryption = ( + encryption_stripped_event.content.get( + EventContentFields.ENCRYPTION_ALGORITHM + ) + if encryption_stripped_event is not None + else None + ) + sliding_sync_insert_map["is_encrypted"] = encryption is not None + + # Find the room_name + room_name_stripped_event = stripped_state_map.get((EventTypes.Name, "")) + sliding_sync_insert_map["room_name"] = ( + room_name_stripped_event.content.get(EventContentFields.ROOM_NAME) + if room_name_stripped_event is not None + else None + ) + + # Check for null bytes in the room name and type. We have to + # ignore values with null bytes as Postgres doesn't allow them + # in text columns. + if ( + sliding_sync_insert_map["room_name"] is not None + and "\0" in sliding_sync_insert_map["room_name"] + ): + sliding_sync_insert_map.pop("room_name") + + if ( + sliding_sync_insert_map["room_type"] is not None + and "\0" in sliding_sync_insert_map["room_type"] + ): + sliding_sync_insert_map.pop("room_type") + + # Find the tombstone_successor_room_id + # Note: This isn't one of the stripped state events according to the spec + # but seems like there is no reason not to support this kind of thing. + tombstone_stripped_event = stripped_state_map.get( + (EventTypes.Tombstone, "") + ) + sliding_sync_insert_map["tombstone_successor_room_id"] = ( + tombstone_stripped_event.content.get( + EventContentFields.TOMBSTONE_SUCCESSOR_ROOM + ) + if tombstone_stripped_event is not None + else None + ) + + if ( + sliding_sync_insert_map["tombstone_successor_room_id"] is not None + and "\0" in sliding_sync_insert_map["tombstone_successor_room_id"] + ): + sliding_sync_insert_map.pop("tombstone_successor_room_id") + + else: + # No stripped state provided + sliding_sync_insert_map["has_known_state"] = False + sliding_sync_insert_map["room_type"] = None + sliding_sync_insert_map["room_name"] = None + sliding_sync_insert_map["is_encrypted"] = False + else: + # No stripped state provided + sliding_sync_insert_map["has_known_state"] = False + sliding_sync_insert_map["room_type"] = None + sliding_sync_insert_map["room_name"] = None + sliding_sync_insert_map["is_encrypted"] = False + + return sliding_sync_insert_map + + def _update_sliding_sync_tables_with_new_persisted_events_txn( + self, + txn: LoggingTransaction, + room_id: str, + events_and_contexts: List[Tuple[EventBase, EventContext]], + ) -> None: + """ + Update the latest `event_stream_ordering`/`bump_stamp` columns in the + `sliding_sync_joined_rooms` table for the room with new events. + + This function assumes that `_store_event_txn()` (to persist the event) and + `_update_current_state_txn(...)` (so that `sliding_sync_joined_rooms` table has + been updated with rooms that were joined) have already been run. + + Args: + txn + room_id: The room that all of the events belong to + events_and_contexts: The events being persisted. We assume the list is + sorted ascending by `stream_ordering`. We don't care about the sort when the + events are backfilled (with negative `stream_ordering`). + """ + + # Nothing to do if there are no events + if len(events_and_contexts) == 0: + return + + # Since the list is sorted ascending by `stream_ordering`, the last event should + # have the highest `stream_ordering`. + max_stream_ordering = events_and_contexts[-1][ + 0 + ].internal_metadata.stream_ordering + # `stream_ordering` should be assigned for persisted events + assert max_stream_ordering is not None + # Check if the event is a backfilled event (with a negative `stream_ordering`). + # If one event is backfilled, we assume this whole batch was backfilled. + if max_stream_ordering < 0: + # We only update the sliding sync tables for non-backfilled events. + return + + max_bump_stamp = None + for event, _ in reversed(events_and_contexts): + # Sanity check that all events belong to the same room + assert event.room_id == room_id + + if event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES: + # `stream_ordering` should be assigned for persisted events + assert event.internal_metadata.stream_ordering is not None + + max_bump_stamp = event.internal_metadata.stream_ordering + + # Since we're iterating in reverse, we can break as soon as we find a + # matching bump event which should have the highest `stream_ordering`. + break + + # Handle updating the `sliding_sync_joined_rooms` table. + # + txn.execute( + """ + UPDATE sliding_sync_joined_rooms + SET + event_stream_ordering = CASE + WHEN event_stream_ordering IS NULL OR event_stream_ordering < ? + THEN ? + ELSE event_stream_ordering + END, + bump_stamp = CASE + WHEN bump_stamp IS NULL OR bump_stamp < ? + THEN ? + ELSE bump_stamp + END + WHERE room_id = ? + """, + ( + max_stream_ordering, + max_stream_ordering, + max_bump_stamp, + max_bump_stamp, + room_id, + ), + ) + # This may or may not update any rows depending if we are `no_longer_in_room` def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str) -> None: """Update the room version in the database based off current state @@ -1931,7 +2866,9 @@ class PersistEventsStore: ) for event in events: + # Sanity check that we're working with persisted events assert event.internal_metadata.stream_ordering is not None + assert event.internal_metadata.instance_name is not None # We update the local_current_membership table only if the event is # "current", i.e., its something that has just happened. @@ -1945,6 +2882,16 @@ class PersistEventsStore: and event.internal_metadata.is_outlier() and event.internal_metadata.is_out_of_band_membership() ): + # The only sort of out-of-band-membership events we expect to see here + # are remote invites/knocks and LEAVE events corresponding to + # rejected/retracted invites and rescinded knocks. + assert event.type == EventTypes.Member + assert event.membership in ( + Membership.INVITE, + Membership.KNOCK, + Membership.LEAVE, + ) + self.db_pool.simple_upsert_txn( txn, table="local_current_membership", @@ -1956,6 +2903,59 @@ class PersistEventsStore: }, ) + # Handle updating the `sliding_sync_membership_snapshots` table + # (out-of-band membership events only) + # + raw_stripped_state_events = None + if event.membership == Membership.INVITE: + invite_room_state = event.unsigned.get("invite_room_state") + raw_stripped_state_events = invite_room_state + elif event.membership == Membership.KNOCK: + knock_room_state = event.unsigned.get("knock_room_state") + raw_stripped_state_events = knock_room_state + + insert_values = { + "sender": event.sender, + "membership_event_id": event.event_id, + "membership": event.membership, + # Since this is a new membership, it isn't forgotten anymore (which + # matches how Synapse currently thinks about the forgotten status) + "forgotten": 0, + "event_stream_ordering": event.internal_metadata.stream_ordering, + "event_instance_name": event.internal_metadata.instance_name, + } + if event.membership == Membership.LEAVE: + # Inherit the meta data from the remote invite/knock. When using + # sliding sync filters, this will prevent the room from + # disappearing/appearing just because you left the room. + pass + elif event.membership in (Membership.INVITE, Membership.KNOCK): + extra_insert_values = ( + self._get_sliding_sync_insert_values_from_stripped_state( + raw_stripped_state_events + ) + ) + insert_values.update(extra_insert_values) + else: + # We don't know how to handle this type of membership yet + # + # FIXME: We should use `assert_never` here but for some reason + # the exhaustive matching doesn't recognize the `Never` here. + # assert_never(event.membership) + raise AssertionError( + f"Unexpected out-of-band membership {event.membership} ({event.event_id}) that we don't know how to handle yet" + ) + + self.db_pool.simple_upsert_txn( + txn, + table="sliding_sync_membership_snapshots", + keyvalues={ + "room_id": event.room_id, + "user_id": event.state_key, + }, + values=insert_values, + ) + def _handle_event_relations( self, txn: LoggingTransaction, event: EventBase ) -> None: @@ -2221,7 +3221,7 @@ class PersistEventsStore: if notifiable_events: txn.execute_batch( sql, - ( + [ ( event.room_id, event.internal_metadata.stream_ordering, @@ -2229,18 +3229,18 @@ class PersistEventsStore: event.event_id, ) for event in notifiable_events - ), + ], ) # Now we delete the staging area for *all* events that were being # persisted. txn.execute_batch( "DELETE FROM event_push_actions_staging WHERE event_id = ?", - ( + [ (event.event_id,) for event, _ in all_events_and_contexts if event.internal_metadata.is_notifiable() - ), + ], ) def _remove_push_actions_for_event_id_txn( @@ -2415,7 +3415,7 @@ class PersistEventsStore: ) potential_backwards_extremities.difference_update( - e for e, in existing_events_outliers + e for (e,) in existing_events_outliers ) if potential_backwards_extremities: @@ -2448,8 +3448,7 @@ class PersistEventsStore: # Delete all these events that we've already fetched and now know that their # prev events are the new backwards extremeties. query = ( - "DELETE FROM event_backward_extremities" - " WHERE event_id = ? AND room_id = ?" + "DELETE FROM event_backward_extremities WHERE event_id = ? AND room_id = ?" ) backward_extremity_tuples_to_remove = [ (ev.event_id, ev.room_id) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py
index 64d303e330..5c83a9f779 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py
@@ -24,9 +24,14 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast import attr -from synapse.api.constants import EventContentFields, RelationTypes +from synapse.api.constants import ( + MAX_DEPTH, + EventContentFields, + Membership, + RelationTypes, +) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.events import make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, @@ -34,9 +39,27 @@ from synapse.storage.database import ( LoggingTransaction, make_tuple_comparison_clause, ) -from synapse.storage.databases.main.events import PersistEventsStore +from synapse.storage.databases.main.events import ( + SLIDING_SYNC_RELEVANT_STATE_SET, + PersistEventsStore, + SlidingSyncMembershipInfoWithEventPos, + SlidingSyncMembershipSnapshotSharedInsertValues, + SlidingSyncStateInsertValues, +) +from synapse.storage.databases.main.events_worker import ( + DatabaseCorruptionError, + InvalidEventError, +) +from synapse.storage.databases.main.state_deltas import StateDeltasStore +from synapse.storage.databases.main.stream import StreamWorkerStore +from synapse.storage.engines import PostgresEngine from synapse.storage.types import Cursor -from synapse.types import JsonDict, StrCollection +from synapse.types import JsonDict, RoomStreamToken, StateMap, StrCollection +from synapse.types.handlers import SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES +from synapse.types.state import StateFilter +from synapse.types.storage import _BackgroundUpdates +from synapse.util import json_encoder +from synapse.util.iterutils import batch_iter if TYPE_CHECKING: from synapse.server import HomeServer @@ -59,26 +82,6 @@ _REPLACE_STREAM_ORDERING_SQL_COMMANDS = ( ) -class _BackgroundUpdates: - EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" - EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" - POPULATE_STREAM_ORDERING2 = "populate_stream_ordering2" - INDEX_STREAM_ORDERING2 = "index_stream_ordering2" - INDEX_STREAM_ORDERING2_CONTAINS_URL = "index_stream_ordering2_contains_url" - INDEX_STREAM_ORDERING2_ROOM_ORDER = "index_stream_ordering2_room_order" - INDEX_STREAM_ORDERING2_ROOM_STREAM = "index_stream_ordering2_room_stream" - INDEX_STREAM_ORDERING2_TS = "index_stream_ordering2_ts" - REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column" - - EVENT_EDGES_DROP_INVALID_ROWS = "event_edges_drop_invalid_rows" - EVENT_EDGES_REPLACE_INDEX = "event_edges_replace_index" - - EVENTS_POPULATE_STATE_KEY_REJECTIONS = "events_populate_state_key_rejections" - - EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index" - - @attr.s(slots=True, frozen=True, auto_attribs=True) class _CalculateChainCover: """Return value for _calculate_chain_cover_txn.""" @@ -97,7 +100,19 @@ class _CalculateChainCover: finished_room_map: Dict[str, Tuple[int, int]] -class EventsBackgroundUpdatesStore(SQLBaseStore): +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _JoinedRoomStreamOrderingUpdate: + """ + Intermediate container class used in `SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE` + """ + + # The most recent event stream_ordering for the room + most_recent_event_stream_ordering: int + # The most recent event `bump_stamp` for the room + most_recent_bump_stamp: Optional[int] + + +class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseStore): def __init__( self, database: DatabasePool, @@ -279,6 +294,44 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): where_clause="NOT outlier", ) + # Handle background updates for Sliding Sync tables + # + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, + self._sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update, + ) + # Add some background updates to populate the sliding sync tables + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE, + self._sliding_sync_joined_rooms_bg_update, + ) + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + self._sliding_sync_membership_snapshots_bg_update, + ) + # Add a background update to fix data integrity issue in the + # `sliding_sync_membership_snapshots` -> `forgotten` column + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE, + self._sliding_sync_membership_snapshots_fix_forgotten_column_bg_update, + ) + + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.FIXUP_MAX_DEPTH_CAP, self.fixup_max_depth_cap_bg_update + ) + + # We want this to run on the main database at startup before we start processing + # events. + # + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + with db_conn.cursor(txn_name="resolve_sliding_sync") as txn: + _resolve_stale_data_in_sliding_sync_tables( + txn=txn, + ) + async def _background_reindex_fields_sender( self, progress: JsonDict, batch_size: int ) -> int: @@ -586,7 +639,8 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): room_ids = {row[0] for row in rows} for room_id in room_ids: txn.call_after( - self.get_latest_event_ids_in_room.invalidate, (room_id,) # type: ignore[attr-defined] + self.get_latest_event_ids_in_room.invalidate, # type: ignore[attr-defined] + (room_id,), ) self.db_pool.simple_delete_many_txn( @@ -1073,7 +1127,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): PersistEventsStore._add_chain_cover_index( txn, self.db_pool, - self.event_chain_id_gen, # type: ignore[attr-defined] + self.event_chain_id_gen, event_to_room_id, event_to_types, cast(Dict[str, StrCollection], event_to_auth_chain), @@ -1516,3 +1570,1320 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): ) return batch_size + + async def _sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update( + self, progress: JsonDict, _batch_size: int + ) -> int: + """ + Prefill `sliding_sync_joined_rooms_to_recalculate` table with all rooms we know about already. + """ + + def _txn(txn: LoggingTransaction) -> None: + # We do this as one big bulk insert. This has been tested on a bigger + # homeserver with ~10M rooms and took 60s. There is potential for this to + # starve disk usage while this goes on. + # + # We upsert in case we have to run this multiple times. + txn.execute( + """ + INSERT INTO sliding_sync_joined_rooms_to_recalculate + (room_id) + SELECT DISTINCT room_id FROM local_current_membership + WHERE membership = 'join' + ON CONFLICT (room_id) + DO NOTHING; + """, + ) + + await self.db_pool.runInteraction( + "_sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update", + _txn, + ) + + # Background update is done. + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE + ) + return 0 + + async def _sliding_sync_joined_rooms_bg_update( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Background update to populate the `sliding_sync_joined_rooms` table. + """ + # We don't need to fetch any progress state because we just grab the next N + # events in `sliding_sync_joined_rooms_to_recalculate` + + def _get_rooms_to_update_txn(txn: LoggingTransaction) -> List[Tuple[str]]: + """ + Returns: + A list of room ID's to update along with the progress value + (event_stream_ordering) indicating the continuation point in the + `current_state_events` table for the next batch. + """ + # Fetch the set of room IDs that we want to update + # + # We use `current_state_events` table as the barometer for whether the + # server is still participating in the room because if we're + # `no_longer_in_room`, this table would be cleared out for the given + # `room_id`. + txn.execute( + """ + SELECT room_id + FROM sliding_sync_joined_rooms_to_recalculate + LIMIT ? + """, + (batch_size,), + ) + + rooms_to_update_rows = cast(List[Tuple[str]], txn.fetchall()) + + return rooms_to_update_rows + + rooms_to_update = await self.db_pool.runInteraction( + "_sliding_sync_joined_rooms_bg_update._get_rooms_to_update_txn", + _get_rooms_to_update_txn, + ) + + if not rooms_to_update: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE + ) + return 0 + + # Map from room_id to insert/update state values in the `sliding_sync_joined_rooms` table. + joined_room_updates: Dict[str, SlidingSyncStateInsertValues] = {} + # Map from room_id to stream_ordering/bump_stamp, etc values + joined_room_stream_ordering_updates: Dict[ + str, _JoinedRoomStreamOrderingUpdate + ] = {} + # As long as we get this value before we fetch the current state, we can use it + # to check if something has changed since that point. + most_recent_current_state_delta_stream_id = ( + await self.get_max_stream_id_in_current_state_deltas() + ) + for (room_id,) in rooms_to_update: + current_state_ids_map = await self.db_pool.runInteraction( + "_sliding_sync_joined_rooms_bg_update._get_relevant_sliding_sync_current_state_event_ids_txn", + PersistEventsStore._get_relevant_sliding_sync_current_state_event_ids_txn, + room_id, + ) + + # If we're not joined to the room a) it doesn't belong in the + # `sliding_sync_joined_rooms` table so we should skip and b) we won't have + # any `current_state_events` for the room. + if not current_state_ids_map: + continue + + try: + fetched_events = await self.get_events(current_state_ids_map.values()) + except (DatabaseCorruptionError, InvalidEventError) as e: + logger.warning( + "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", + room_id, + e, + ) + continue + + current_state_map: StateMap[EventBase] = { + state_key: fetched_events[event_id] + for state_key, event_id in current_state_ids_map.items() + # `get_events(...)` will filter out events for unknown room versions + if event_id in fetched_events + } + + # Even if we are joined to the room, this can happen for unknown room + # versions (old room versions that aren't known anymore) since + # `get_events(...)` will filter out events for unknown room versions + if not current_state_map: + continue + + state_insert_values = ( + PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + current_state_map + ) + ) + # We should have some insert values for each room, even if they are `None` + assert state_insert_values + joined_room_updates[room_id] = state_insert_values + + # Figure out the stream_ordering of the latest event in the room + most_recent_event_pos_results = await self.get_last_event_pos_in_room( + room_id, event_types=None + ) + assert most_recent_event_pos_results is not None, ( + f"We should not be seeing `None` here because the room ({room_id}) should at-least have a create event " + + "given we pulled the room out of `current_state_events`" + ) + most_recent_event_stream_ordering = most_recent_event_pos_results[1].stream + + # The `most_recent_event_stream_ordering` should be positive, + # however there are (very rare) rooms where that is not the case in + # the matrix.org database. It's not clear how they got into that + # state, but does mean that we cannot assert that the stream + # ordering is indeed positive. + + # Figure out the latest `bump_stamp` in the room. This could be `None` for a + # federated room you just joined where all of events are still `outliers` or + # backfilled history. In the Sliding Sync API, we default to the user's + # membership event `stream_ordering` if we don't have a `bump_stamp` so + # having it as `None` in this table is fine. + bump_stamp_event_pos_results = await self.get_last_event_pos_in_room( + room_id, event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES + ) + most_recent_bump_stamp = None + if ( + bump_stamp_event_pos_results is not None + and bump_stamp_event_pos_results[1].stream > 0 + ): + most_recent_bump_stamp = bump_stamp_event_pos_results[1].stream + + joined_room_stream_ordering_updates[room_id] = ( + _JoinedRoomStreamOrderingUpdate( + most_recent_event_stream_ordering=most_recent_event_stream_ordering, + most_recent_bump_stamp=most_recent_bump_stamp, + ) + ) + + def _fill_table_txn(txn: LoggingTransaction) -> None: + # Handle updating the `sliding_sync_joined_rooms` table + # + for ( + room_id, + update_map, + ) in joined_room_updates.items(): + joined_room_stream_ordering_update = ( + joined_room_stream_ordering_updates[room_id] + ) + event_stream_ordering = ( + joined_room_stream_ordering_update.most_recent_event_stream_ordering + ) + bump_stamp = joined_room_stream_ordering_update.most_recent_bump_stamp + + # Check if the current state has been updated since we gathered it. + # We're being careful not to insert/overwrite with stale data. + state_deltas_since_we_gathered_current_state = ( + self.get_current_state_deltas_for_room_txn( + txn, + room_id, + from_token=RoomStreamToken( + stream=most_recent_current_state_delta_stream_id + ), + to_token=None, + ) + ) + for state_delta in state_deltas_since_we_gathered_current_state: + # We only need to check for the state is relevant to the + # `sliding_sync_joined_rooms` table. + if ( + state_delta.event_type, + state_delta.state_key, + ) in SLIDING_SYNC_RELEVANT_STATE_SET: + # Raising exception so we can just exit and try again. It would + # be hard to resolve this within the transaction because we need + # to get full events out that take redactions into account. We + # could add some retry logic here, but it's easier to just let + # the background update try again. + raise Exception( + "Current state was updated after we gathered it to update " + + "`sliding_sync_joined_rooms` in the background update. " + + "Raising exception so we can just try again." + ) + + # Since we fully insert rows into `sliding_sync_joined_rooms`, we can + # just do everything on insert and `ON CONFLICT DO NOTHING`. + # + self.db_pool.simple_upsert_txn( + txn, + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + values={}, + insertion_values={ + **update_map, + # The reason we're only *inserting* (not *updating*) `event_stream_ordering` + # and `bump_stamp` is because if they are present, that means they are already + # up-to-date. + "event_stream_ordering": event_stream_ordering, + "bump_stamp": bump_stamp, + }, + ) + + # Now that we've processed all the room, we can remove them from the + # queue. + # + # Note: we need to remove all the rooms from the queue we pulled out + # from the DB, not just the ones we've processed above. Otherwise + # we'll simply keep pulling out the same rooms over and over again. + self.db_pool.simple_delete_many_batch_txn( + txn, + table="sliding_sync_joined_rooms_to_recalculate", + keys=("room_id",), + values=rooms_to_update, + ) + + await self.db_pool.runInteraction( + "sliding_sync_joined_rooms_bg_update", _fill_table_txn + ) + + return len(rooms_to_update) + + async def _sliding_sync_membership_snapshots_bg_update( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Background update to populate the `sliding_sync_membership_snapshots` table. + """ + # We do this in two phases: a) the initial phase where we go through all + # room memberships, and then b) a second phase where we look at new + # memberships (this is to handle the case where we downgrade and then + # upgrade again). + # + # We have to do this as two phases (rather than just the second phase + # where we iterate on event_stream_ordering), as the + # `event_stream_ordering` column may have null values for old rows. + # Therefore we first do the set of historic rooms and *then* look at any + # new rows (which will have a non-null `event_stream_ordering`). + initial_phase = progress.get("initial_phase") + if initial_phase is None: + # If this is the first run, store the current max stream position. + # We know we will go through all memberships less than the current + # max in the initial phase. + progress = { + "initial_phase": True, + "last_event_stream_ordering": self.get_room_max_stream_ordering(), + } + await self.db_pool.updates._background_update_progress( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + progress, + ) + initial_phase = True + + last_room_id = progress.get("last_room_id", "") + last_user_id = progress.get("last_user_id", "") + last_event_stream_ordering = progress["last_event_stream_ordering"] + + def _find_memberships_to_update_txn( + txn: LoggingTransaction, + ) -> List[ + Tuple[ + str, + Optional[str], + Optional[str], + str, + str, + str, + str, + int, + Optional[str], + bool, + ] + ]: + # Fetch the set of event IDs that we want to update + # + # We skip over rows which we've already handled, i.e. have a + # matching row in `sliding_sync_membership_snapshots` with the same + # room, user and event ID. + # + # We also ignore rooms that the user has left themselves (i.e. not + # kicked). This is to avoid having to port lots of old rooms that we + # will never send down sliding sync (as we exclude such rooms from + # initial syncs). + + if initial_phase: + # There are some old out-of-band memberships (before + # https://github.com/matrix-org/synapse/issues/6983) where we don't have + # the corresponding room stored in the `rooms` table`. We use `LEFT JOIN + # rooms AS r USING (room_id)` to find the rooms missing from `rooms` and + # insert a row for them below. + txn.execute( + """ + SELECT + c.room_id, + r.room_id, + r.room_version, + c.user_id, + e.sender, + c.event_id, + c.membership, + e.stream_ordering, + e.instance_name, + e.outlier + FROM local_current_membership AS c + LEFT JOIN sliding_sync_membership_snapshots AS m USING (room_id, user_id) + INNER JOIN events AS e USING (event_id) + LEFT JOIN rooms AS r ON (c.room_id = r.room_id) + WHERE (c.room_id, c.user_id) > (?, ?) + AND (m.user_id IS NULL OR c.event_id != m.membership_event_id) + ORDER BY c.room_id ASC, c.user_id ASC + LIMIT ? + """, + (last_room_id, last_user_id, batch_size), + ) + elif last_event_stream_ordering is not None: + # It's important to sort by `event_stream_ordering` *ascending* (oldest to + # newest) so that if we see that this background update in progress and want + # to start the catch-up process, we can safely assume that it will + # eventually get to the rooms we want to catch-up on anyway (see + # `_resolve_stale_data_in_sliding_sync_tables()`). + # + # `c.room_id` is duplicated to make it match what we're doing in the + # `initial_phase`. But we can avoid doing the extra `rooms` table join + # because we can assume all of these new events won't have this problem. + txn.execute( + """ + SELECT + c.room_id, + r.room_id, + r.room_version, + c.user_id, + e.sender, + c.event_id, + c.membership, + c.event_stream_ordering, + e.instance_name, + e.outlier + FROM local_current_membership AS c + LEFT JOIN sliding_sync_membership_snapshots AS m USING (room_id, user_id) + INNER JOIN events AS e USING (event_id) + LEFT JOIN rooms AS r ON (c.room_id = r.room_id) + WHERE c.event_stream_ordering > ? + AND (m.user_id IS NULL OR c.event_id != m.membership_event_id) + ORDER BY c.event_stream_ordering ASC + LIMIT ? + """, + (last_event_stream_ordering, batch_size), + ) + else: + raise Exception("last_event_stream_ordering should not be None") + + memberships_to_update_rows = cast( + List[ + Tuple[ + str, + Optional[str], + Optional[str], + str, + str, + str, + str, + int, + Optional[str], + bool, + ] + ], + txn.fetchall(), + ) + + return memberships_to_update_rows + + memberships_to_update_rows = await self.db_pool.runInteraction( + "sliding_sync_membership_snapshots_bg_update._find_memberships_to_update_txn", + _find_memberships_to_update_txn, + ) + + if not memberships_to_update_rows: + if initial_phase: + # Move onto the next phase. + await self.db_pool.updates._background_update_progress( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + { + "initial_phase": False, + "last_event_stream_ordering": last_event_stream_ordering, + }, + ) + return 0 + else: + # We've finished both phases, we're done. + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE + ) + return 0 + + def _find_previous_invite_or_knock_membership_txn( + txn: LoggingTransaction, room_id: str, user_id: str, event_id: str + ) -> Optional[Tuple[str, str]]: + # Find the previous invite/knock event before the leave event + # + # Here are some notes on how we landed on this query: + # + # We're using `topological_ordering` instead of `stream_ordering` because + # somehow it's possible to have your `leave` event backfilled with a + # negative `stream_ordering` and your previous `invite` event with a + # positive `stream_ordering` so we wouldn't have a chance of finding the + # previous membership with a naive `event_stream_ordering < ?` comparison. + # + # Also be careful because `room_memberships.event_stream_ordering` is + # nullable and not always filled in. You would need to join on `events` to + # rely on `events.stream_ordering` instead. Even though the + # `events.stream_ordering` also doesn't have a `NOT NULL` constraint, it + # doesn't have any rows where this is the case (checked on `matrix.org`). + # The fact the `events.stream_ordering` is a nullable column is a holdover + # from a rename of the column. + # + # You might also consider using the `event_auth` table to find the previous + # membership, but there are cases where somehow a membership event doesn't + # point back to the previous membership event in the auth events (unknown + # cause). + txn.execute( + """ + SELECT event_id, membership + FROM room_memberships AS m + INNER JOIN events AS e USING (room_id, event_id) + WHERE + room_id = ? + AND m.user_id = ? + AND (m.membership = ? OR m.membership = ?) + AND e.event_id != ? + ORDER BY e.topological_ordering DESC + LIMIT 1 + """, + ( + room_id, + user_id, + # We look explicitly for `invite` and `knock` events instead of + # just their previous membership as someone could have been `invite` + # -> `ban` -> unbanned (`leave`) and we want to find the `invite` + # event where the stripped state is. + Membership.INVITE, + Membership.KNOCK, + event_id, + ), + ) + row = txn.fetchone() + + if row is None: + # Generally we should have an invite or knock event for leaves + # that are outliers, however this may not always be the case + # (e.g. a local user got kicked but the kick event got pulled in + # as an outlier). + return None + + event_id, membership = row + + return event_id, membership + + # Map from (room_id, user_id) to ... + to_insert_membership_snapshots: Dict[ + Tuple[str, str], SlidingSyncMembershipSnapshotSharedInsertValues + ] = {} + to_insert_membership_infos: Dict[ + Tuple[str, str], SlidingSyncMembershipInfoWithEventPos + ] = {} + for ( + room_id, + room_id_from_rooms_table, + room_version_id, + user_id, + sender, + membership_event_id, + membership, + membership_event_stream_ordering, + membership_event_instance_name, + is_outlier, + ) in memberships_to_update_rows: + # We don't know how to handle `membership` values other than these. The + # code below would need to be updated. + assert membership in ( + Membership.JOIN, + Membership.INVITE, + Membership.KNOCK, + Membership.LEAVE, + Membership.BAN, + ) + + if ( + room_version_id is not None + and room_version_id not in KNOWN_ROOM_VERSIONS + ): + # Ignore rooms with unknown room versions (these were + # experimental rooms, that we no longer support). + continue + + # There are some old out-of-band memberships (before + # https://github.com/matrix-org/synapse/issues/6983) where we don't have the + # corresponding room stored in the `rooms` table`. We have a `FOREIGN KEY` + # constraint on the `sliding_sync_membership_snapshots` table so we have to + # fix-up these memberships by adding the room to the `rooms` table. + if room_id_from_rooms_table is None: + await self.db_pool.simple_insert( + table="rooms", + values={ + "room_id": room_id, + # Only out-of-band memberships are missing from the `rooms` + # table so that is the only type of membership we're dealing + # with here. Since we don't calculate the "chain cover" for + # out-of-band memberships, we can just set this to `True` as if + # the user ever joins the room, we will end up calculating the + # "chain cover" anyway. + "has_auth_chain_index": True, + }, + ) + + # Map of values to insert/update in the `sliding_sync_membership_snapshots` table + sliding_sync_membership_snapshots_insert_map: SlidingSyncMembershipSnapshotSharedInsertValues = {} + if membership == Membership.JOIN: + # If we're still joined, we can pull from current state. + current_state_ids_map: StateMap[ + str + ] = await self.hs.get_storage_controllers().state.get_current_state_ids( + room_id, + state_filter=StateFilter.from_types( + SLIDING_SYNC_RELEVANT_STATE_SET + ), + # Partially-stated rooms should have all state events except for + # remote membership events so we don't need to wait at all because + # we only want some non-membership state + await_full_state=False, + ) + # We're iterating over rooms that we are joined to so they should + # have `current_state_events` and we should have some current state + # for each room + if current_state_ids_map: + try: + fetched_events = await self.get_events( + current_state_ids_map.values() + ) + except (DatabaseCorruptionError, InvalidEventError) as e: + logger.warning( + "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", + room_id, + e, + ) + continue + + current_state_map: StateMap[EventBase] = { + state_key: fetched_events[event_id] + for state_key, event_id in current_state_ids_map.items() + # `get_events(...)` will filter out events for unknown room versions + if event_id in fetched_events + } + + # Can happen for unknown room versions (old room versions that aren't known + # anymore) since `get_events(...)` will filter out events for unknown room + # versions + if not current_state_map: + continue + + state_insert_values = PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + current_state_map + ) + sliding_sync_membership_snapshots_insert_map.update( + state_insert_values + ) + # We should have some insert values for each room, even if they are `None` + assert sliding_sync_membership_snapshots_insert_map + + # We have current state to work from + sliding_sync_membership_snapshots_insert_map["has_known_state"] = ( + True + ) + else: + # Although we expect every room to have a create event (even + # past unknown room versions since we haven't supported one + # without it), there seem to be some corrupted rooms in + # practice that don't have the create event in the + # `current_state_events` table. The create event does exist + # in the events table though. We'll just say that we don't + # know the state for these rooms and continue on with our + # day. + sliding_sync_membership_snapshots_insert_map = { + "has_known_state": False, + "room_type": None, + "room_name": None, + "is_encrypted": False, + } + elif membership in (Membership.INVITE, Membership.KNOCK) or ( + membership in (Membership.LEAVE, Membership.BAN) and is_outlier + ): + invite_or_knock_event_id = None + invite_or_knock_membership = None + + # If the event is an `out_of_band_membership` (special case of + # `outlier`), we never had historical state so we have to pull from + # the stripped state on the previous invite/knock event. This gives + # us a consistent view of the room state regardless of your + # membership (i.e. the room shouldn't disappear if your using the + # `is_encrypted` filter and you leave). + if membership in (Membership.LEAVE, Membership.BAN) and is_outlier: + previous_membership = await self.db_pool.runInteraction( + "sliding_sync_membership_snapshots_bg_update._find_previous_invite_or_knock_membership_txn", + _find_previous_invite_or_knock_membership_txn, + room_id, + user_id, + membership_event_id, + ) + if previous_membership is not None: + ( + invite_or_knock_event_id, + invite_or_knock_membership, + ) = previous_membership + else: + invite_or_knock_event_id = membership_event_id + invite_or_knock_membership = membership + + if ( + invite_or_knock_event_id is not None + and invite_or_knock_membership is not None + ): + # Pull from the stripped state on the invite/knock event + invite_or_knock_event = await self.get_event( + invite_or_knock_event_id + ) + + raw_stripped_state_events = None + if invite_or_knock_membership == Membership.INVITE: + invite_room_state = invite_or_knock_event.unsigned.get( + "invite_room_state" + ) + raw_stripped_state_events = invite_room_state + elif invite_or_knock_membership == Membership.KNOCK: + knock_room_state = invite_or_knock_event.unsigned.get( + "knock_room_state" + ) + raw_stripped_state_events = knock_room_state + + sliding_sync_membership_snapshots_insert_map = PersistEventsStore._get_sliding_sync_insert_values_from_stripped_state( + raw_stripped_state_events + ) + else: + # We couldn't find any state for the membership, so we just have to + # leave it as empty. + sliding_sync_membership_snapshots_insert_map = { + "has_known_state": False, + "room_type": None, + "room_name": None, + "is_encrypted": False, + } + + # We should have some insert values for each room, even if no + # stripped state is on the event because we still want to record + # that we have no known state + assert sliding_sync_membership_snapshots_insert_map + elif membership in (Membership.LEAVE, Membership.BAN): + # Pull from historical state + state_ids_map = await self.hs.get_storage_controllers().state.get_state_ids_for_event( + membership_event_id, + state_filter=StateFilter.from_types( + SLIDING_SYNC_RELEVANT_STATE_SET + ), + # Partially-stated rooms should have all state events except for + # remote membership events so we don't need to wait at all because + # we only want some non-membership state + await_full_state=False, + ) + + try: + fetched_events = await self.get_events(state_ids_map.values()) + except (DatabaseCorruptionError, InvalidEventError) as e: + logger.warning( + "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", + room_id, + e, + ) + continue + + state_map: StateMap[EventBase] = { + state_key: fetched_events[event_id] + for state_key, event_id in state_ids_map.items() + # `get_events(...)` will filter out events for unknown room versions + if event_id in fetched_events + } + + # Can happen for unknown room versions (old room versions that aren't known + # anymore) since `get_events(...)` will filter out events for unknown room + # versions + if not state_map: + continue + + state_insert_values = ( + PersistEventsStore._get_sliding_sync_insert_values_from_state_map( + state_map + ) + ) + sliding_sync_membership_snapshots_insert_map.update(state_insert_values) + # We should have some insert values for each room, even if they are `None` + assert sliding_sync_membership_snapshots_insert_map + + # We have historical state to work from + sliding_sync_membership_snapshots_insert_map["has_known_state"] = True + else: + # We don't know how to handle this type of membership yet + # + # FIXME: We should use `assert_never` here but for some reason + # the exhaustive matching doesn't recognize the `Never` here. + # assert_never(membership) + raise AssertionError( + f"Unexpected membership {membership} ({membership_event_id}) that we don't know how to handle yet" + ) + + to_insert_membership_snapshots[(room_id, user_id)] = ( + sliding_sync_membership_snapshots_insert_map + ) + to_insert_membership_infos[(room_id, user_id)] = ( + SlidingSyncMembershipInfoWithEventPos( + user_id=user_id, + sender=sender, + membership_event_id=membership_event_id, + membership=membership, + membership_event_stream_ordering=membership_event_stream_ordering, + # If instance_name is null we default to "master" + membership_event_instance_name=membership_event_instance_name + or "master", + ) + ) + + def _fill_table_txn(txn: LoggingTransaction) -> None: + # Handle updating the `sliding_sync_membership_snapshots` table + # + for key, insert_map in to_insert_membership_snapshots.items(): + room_id, user_id = key + membership_info = to_insert_membership_infos[key] + sender = membership_info.sender + membership_event_id = membership_info.membership_event_id + membership = membership_info.membership + membership_event_stream_ordering = ( + membership_info.membership_event_stream_ordering + ) + membership_event_instance_name = ( + membership_info.membership_event_instance_name + ) + + # We don't need to upsert the state because we never partially + # insert/update the snapshots and anything already there is up-to-date + # EXCEPT for the `forgotten` field since that is updated out-of-band + # from the membership changes. + # + # Even though we're only doing insertions, we're using + # `simple_upsert_txn()` here to avoid unique violation errors that would + # happen from `simple_insert_txn()` + self.db_pool.simple_upsert_txn( + txn, + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id, "user_id": user_id}, + values={}, + insertion_values={ + **insert_map, + "sender": sender, + "membership_event_id": membership_event_id, + "membership": membership, + "event_stream_ordering": membership_event_stream_ordering, + "event_instance_name": membership_event_instance_name, + }, + ) + # We need to find the `forgotten` value during the transaction because + # we can't risk inserting stale data. + if isinstance(txn.database_engine, PostgresEngine): + txn.execute( + """ + UPDATE sliding_sync_membership_snapshots + SET + forgotten = m.forgotten + FROM room_memberships AS m + WHERE sliding_sync_membership_snapshots.room_id = ? + AND sliding_sync_membership_snapshots.user_id = ? + AND membership_event_id = ? + AND membership_event_id = m.event_id + AND m.event_id IS NOT NULL + """, + ( + room_id, + user_id, + membership_event_id, + ), + ) + else: + # SQLite doesn't support UPDATE FROM before 3.33.0, so we do + # this via sub-selects. + txn.execute( + """ + UPDATE sliding_sync_membership_snapshots + SET + forgotten = (SELECT forgotten FROM room_memberships WHERE event_id = ?) + WHERE room_id = ? and user_id = ? AND membership_event_id = ? + """, + ( + membership_event_id, + room_id, + user_id, + membership_event_id, + ), + ) + + await self.db_pool.runInteraction( + "sliding_sync_membership_snapshots_bg_update", _fill_table_txn + ) + + # Update the progress + ( + room_id, + _room_id_from_rooms_table, + _room_version_id, + user_id, + _sender, + _membership_event_id, + _membership, + membership_event_stream_ordering, + _membership_event_instance_name, + _is_outlier, + ) = memberships_to_update_rows[-1] + + progress = { + "initial_phase": initial_phase, + "last_room_id": room_id, + "last_user_id": user_id, + "last_event_stream_ordering": last_event_stream_ordering, + } + if not initial_phase: + progress["last_event_stream_ordering"] = membership_event_stream_ordering + + await self.db_pool.updates._background_update_progress( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + progress, + ) + + return len(memberships_to_update_rows) + + async def _sliding_sync_membership_snapshots_fix_forgotten_column_bg_update( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Background update to update the `sliding_sync_membership_snapshots` -> + `forgotten` column to be in sync with the `room_memberships` table. + + Because of previously flawed code (now fixed); any room that someone has + forgotten and subsequently re-joined or had any new membership on, we need to go + and update the column to match the `room_memberships` table as it has fallen out + of sync. + """ + last_event_stream_ordering = progress.get( + "last_event_stream_ordering", -(1 << 31) + ) + + def _txn( + txn: LoggingTransaction, + ) -> int: + """ + Returns: + The number of rows updated. + """ + + # To simplify things, we can just recheck any row in + # `sliding_sync_membership_snapshots` with `forgotten=1` + txn.execute( + """ + SELECT + s.room_id, + s.user_id, + s.membership_event_id, + s.event_stream_ordering, + m.forgotten + FROM sliding_sync_membership_snapshots AS s + INNER JOIN room_memberships AS m ON (s.membership_event_id = m.event_id) + WHERE s.event_stream_ordering > ? + AND s.forgotten = 1 + ORDER BY s.event_stream_ordering ASC + LIMIT ? + """, + (last_event_stream_ordering, batch_size), + ) + + memberships_to_update_rows = cast( + List[Tuple[str, str, str, int, int]], + txn.fetchall(), + ) + if not memberships_to_update_rows: + return 0 + + # Assemble the values to update + # + # (room_id, user_id) + key_values: List[Tuple[str, str]] = [] + # (forgotten,) + value_values: List[Tuple[int]] = [] + for ( + room_id, + user_id, + _membership_event_id, + _event_stream_ordering, + forgotten, + ) in memberships_to_update_rows: + key_values.append( + ( + room_id, + user_id, + ) + ) + value_values.append((forgotten,)) + + # Update all of the rows in one go + self.db_pool.simple_update_many_txn( + txn, + table="sliding_sync_membership_snapshots", + key_names=("room_id", "user_id"), + key_values=key_values, + value_names=("forgotten",), + value_values=value_values, + ) + + # Update the progress + ( + _room_id, + _user_id, + _membership_event_id, + event_stream_ordering, + _forgotten, + ) = memberships_to_update_rows[-1] + self.db_pool.updates._background_update_progress_txn( + txn, + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE, + { + "last_event_stream_ordering": event_stream_ordering, + }, + ) + + return len(memberships_to_update_rows) + + num_rows = await self.db_pool.runInteraction( + "_sliding_sync_membership_snapshots_fix_forgotten_column_bg_update", + _txn, + ) + + if not num_rows: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE + ) + + return num_rows + + async def fixup_max_depth_cap_bg_update( + self, progress: JsonDict, batch_size: int + ) -> int: + """Fixes the topological ordering for events that have a depth greater + than MAX_DEPTH. This should fix /messages ordering oddities.""" + + room_id_bound = progress.get("room_id", "") + + def redo_max_depth_bg_update_txn(txn: LoggingTransaction) -> Tuple[bool, int]: + txn.execute( + """ + SELECT room_id, room_version FROM rooms + WHERE room_id > ? + ORDER BY room_id + LIMIT ? + """, + (room_id_bound, batch_size), + ) + + # Find the next room ID to process, with a relevant room version. + room_ids: List[str] = [] + max_room_id: Optional[str] = None + for room_id, room_version_str in txn: + max_room_id = room_id + + # We only want to process rooms with a known room version that + # has strict canonical json validation enabled. + room_version = KNOWN_ROOM_VERSIONS.get(room_version_str) + if room_version and room_version.strict_canonicaljson: + room_ids.append(room_id) + + if max_room_id is None: + # The query did not return any rooms, so we are done. + return True, 0 + + # Update the progress to the last room ID we pulled from the DB, + # this ensures we always make progress. + self.db_pool.updates._background_update_progress_txn( + txn, + _BackgroundUpdates.FIXUP_MAX_DEPTH_CAP, + progress={"room_id": max_room_id}, + ) + + if not room_ids: + # There were no rooms in this batch that required the fix. + return False, 0 + + clause, list_args = make_in_list_sql_clause( + self.database_engine, "room_id", room_ids + ) + sql = f""" + UPDATE events SET topological_ordering = ? + WHERE topological_ordering > ? AND {clause} + """ + args = [MAX_DEPTH, MAX_DEPTH] + args.extend(list_args) + txn.execute(sql, args) + + return False, len(room_ids) + + done, num_rooms = await self.db_pool.runInteraction( + "redo_max_depth_bg_update", redo_max_depth_bg_update_txn + ) + + if done: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.FIXUP_MAX_DEPTH_CAP + ) + + return num_rooms + + +def _resolve_stale_data_in_sliding_sync_tables( + txn: LoggingTransaction, +) -> None: + """ + Clears stale/out-of-date entries from the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables. + + This accounts for when someone downgrades their Synapse version and then upgrades it + again. This will ensure that we don't have any stale/out-of-date data in the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables since any new + events sent in rooms would have also needed to be written to the sliding sync + tables. For example a new event needs to bump `event_stream_ordering` in + `sliding_sync_joined_rooms` table or some state in the room changing (like the room + name). Or another example of someone's membership changing in a room affecting + `sliding_sync_membership_snapshots`. + + This way, if a row exists in the sliding sync tables, we are able to rely on it + (accurate data). And if a row doesn't exist, we use a fallback to get the same info + until the background updates fill in the rows or a new event comes in triggering it + to be fully inserted. + + FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + foreground update for + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + https://github.com/element-hq/synapse/issues/17623) + """ + + _resolve_stale_data_in_sliding_sync_joined_rooms_table(txn) + _resolve_stale_data_in_sliding_sync_membership_snapshots_table(txn) + + +def _resolve_stale_data_in_sliding_sync_joined_rooms_table( + txn: LoggingTransaction, +) -> None: + """ + Clears stale/out-of-date entries from the `sliding_sync_joined_rooms` table and + kicks-off the background update to catch-up with what we missed while Synapse was + downgraded. + + See `_resolve_stale_data_in_sliding_sync_tables()` description above for more + context. + """ + + # Find the point when we stopped writing to the `sliding_sync_joined_rooms` table + txn.execute( + """ + SELECT event_stream_ordering + FROM sliding_sync_joined_rooms + ORDER BY event_stream_ordering DESC + LIMIT 1 + """, + ) + + # If we have nothing written to the `sliding_sync_joined_rooms` table, there is + # nothing to clean up + row = cast(Optional[Tuple[int]], txn.fetchone()) + max_stream_ordering_sliding_sync_joined_rooms_table = None + depends_on = None + if row is not None: + (max_stream_ordering_sliding_sync_joined_rooms_table,) = row + + txn.execute( + """ + SELECT room_id + FROM events + WHERE stream_ordering > ? + GROUP BY room_id + ORDER BY MAX(stream_ordering) ASC + """, + (max_stream_ordering_sliding_sync_joined_rooms_table,), + ) + + room_rows = txn.fetchall() + # No new events have been written to the `events` table since the last time we wrote + # to the `sliding_sync_joined_rooms` table so there is nothing to clean up. This is + # the expected normal scenario for people who have not downgraded their Synapse + # version. + if not room_rows: + return + + # 1000 is an arbitrary batch size with no testing + for chunk in batch_iter(room_rows, 1000): + # Handle updating the `sliding_sync_joined_rooms` table + # + # Clear out the stale data + DatabasePool.simple_delete_many_batch_txn( + txn, + table="sliding_sync_joined_rooms", + keys=("room_id",), + values=chunk, + ) + + # Update the `sliding_sync_joined_rooms_to_recalculate` table with the rooms + # that went stale and now need to be recalculated. + DatabasePool.simple_upsert_many_txn_native_upsert( + txn, + table="sliding_sync_joined_rooms_to_recalculate", + key_names=("room_id",), + key_values=chunk, + value_names=(), + # No value columns, therefore make a blank list so that the following + # zip() works correctly. + value_values=[() for x in range(len(chunk))], + ) + else: + # Avoid adding the background updates when there is no data to run them on (if + # the homeserver has no rooms). The portdb script refuses to run with pending + # background updates and since we potentially add them every time the server + # starts, we add this check for to allow the script to breath. + txn.execute("SELECT 1 FROM local_current_membership LIMIT 1") + row = txn.fetchone() + if row is None: + # There are no rooms, so don't schedule the bg update. + return + + # Re-run the `sliding_sync_joined_rooms_to_recalculate` prefill if there is + # nothing in the `sliding_sync_joined_rooms` table + DatabasePool.simple_upsert_txn_native_upsert( + txn, + table="background_updates", + keyvalues={ + "update_name": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE + }, + values={}, + # Only insert the row if it doesn't already exist. If it already exists, + # we're already working on it + insertion_values={ + "progress_json": "{}", + }, + ) + depends_on = _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE + + # Now kick-off the background update to catch-up with what we missed while Synapse + # was downgraded. + # + # We may need to catch-up on everything if we have nothing written to the + # `sliding_sync_joined_rooms` table yet. This could happen if someone had zero rooms + # on their server (so the normal background update completes), downgrade Synapse + # versions, join and create some new rooms, and upgrade again. + DatabasePool.simple_upsert_txn_native_upsert( + txn, + table="background_updates", + keyvalues={ + "update_name": _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE + }, + values={}, + # Only insert the row if it doesn't already exist. If it already exists, we will + # eventually fill in the rows we're trying to populate. + insertion_values={ + # Empty progress is expected since it's not used for this background update. + "progress_json": "{}", + # Wait for the prefill to finish + "depends_on": depends_on, + }, + ) + + +def _resolve_stale_data_in_sliding_sync_membership_snapshots_table( + txn: LoggingTransaction, +) -> None: + """ + Clears stale/out-of-date entries from the `sliding_sync_membership_snapshots` table + and kicks-off the background update to catch-up with what we missed while Synapse + was downgraded. + + See `_resolve_stale_data_in_sliding_sync_tables()` description above for more + context. + """ + + # Find the point when we stopped writing to the `sliding_sync_membership_snapshots` table + txn.execute( + """ + SELECT event_stream_ordering + FROM sliding_sync_membership_snapshots + ORDER BY event_stream_ordering DESC + LIMIT 1 + """, + ) + + # If we have nothing written to the `sliding_sync_membership_snapshots` table, + # there is nothing to clean up + row = cast(Optional[Tuple[int]], txn.fetchone()) + max_stream_ordering_sliding_sync_membership_snapshots_table = None + if row is not None: + (max_stream_ordering_sliding_sync_membership_snapshots_table,) = row + + # XXX: Since `forgotten` is simply a flag on the `room_memberships` table that is + # set out-of-band, there is no way to tell whether it was set while Synapse was + # downgraded. The only thing the user can do is `/forget` again if they run into + # this. + # + # This only picks up changes to memberships. + txn.execute( + """ + SELECT user_id, room_id + FROM local_current_membership + WHERE event_stream_ordering > ? + ORDER BY event_stream_ordering ASC + """, + (max_stream_ordering_sliding_sync_membership_snapshots_table,), + ) + + membership_rows = txn.fetchall() + # No new events have been written to the `events` table since the last time we wrote + # to the `sliding_sync_membership_snapshots` table so there is nothing to clean up. + # This is the expected normal scenario for people who have not downgraded their + # Synapse version. + if not membership_rows: + return + + # 1000 is an arbitrary batch size with no testing + for chunk in batch_iter(membership_rows, 1000): + # Handle updating the `sliding_sync_membership_snapshots` table + # + DatabasePool.simple_delete_many_batch_txn( + txn, + table="sliding_sync_membership_snapshots", + keys=("user_id", "room_id"), + values=chunk, + ) + else: + # Avoid adding the background updates when there is no data to run them on (if + # the homeserver has no rooms). The portdb script refuses to run with pending + # background updates and since we potentially add them every time the server + # starts, we add this check for to allow the script to breath. + txn.execute("SELECT 1 FROM local_current_membership LIMIT 1") + row = txn.fetchone() + if row is None: + # There are no rooms, so don't schedule the bg update. + return + + # Now kick-off the background update to catch-up with what we missed while Synapse + # was downgraded. + # + # We may need to catch-up on everything if we have nothing written to the + # `sliding_sync_membership_snapshots` table yet. This could happen if someone had + # zero rooms on their server (so the normal background update completes), downgrade + # Synapse versions, join and create some new rooms, and upgrade again. + # + progress_json: JsonDict = {} + if max_stream_ordering_sliding_sync_membership_snapshots_table is not None: + progress_json["initial_phase"] = False + progress_json["last_event_stream_ordering"] = ( + max_stream_ordering_sliding_sync_membership_snapshots_table + ) + + DatabasePool.simple_upsert_txn_native_upsert( + txn, + table="background_updates", + keyvalues={ + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE + }, + values={}, + # Only insert the row if it doesn't already exist. If it already exists, we will + # eventually fill in the rows we're trying to populate. + insertion_values={ + "progress_json": json_encoder.encode(progress_json), + }, + ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 4d4877c4c3..3db4460f57 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py
@@ -30,6 +30,7 @@ from typing import ( Dict, Iterable, List, + Literal, Mapping, MutableMapping, Optional, @@ -41,7 +42,6 @@ from typing import ( import attr from prometheus_client import Gauge -from typing_extensions import Literal from twisted.internet import defer @@ -61,7 +61,13 @@ from synapse.logging.context import ( current_context, make_deferred_yieldable, ) -from synapse.logging.opentracing import start_active_span, tag_args, trace +from synapse.logging.opentracing import ( + SynapseTags, + set_tag, + start_active_span, + tag_args, + trace, +) from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, @@ -83,6 +89,7 @@ from synapse.storage.util.id_generators import ( from synapse.storage.util.sequence import build_sequence_generator from synapse.types import JsonDict, get_domain_from_id from synapse.types.state import StateFilter +from synapse.types.storage import _BackgroundUpdates from synapse.util import unwrapFirstError from synapse.util.async_helpers import ObservableDeferred, delay_cancellation from synapse.util.caches.descriptors import cached, cachedList @@ -98,6 +105,26 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +class DatabaseCorruptionError(RuntimeError): + """We found an event in the DB that has a persisted event ID that doesn't + match its computed event ID.""" + + def __init__( + self, room_id: str, persisted_event_id: str, computed_event_id: str + ) -> None: + self.room_id = room_id + self.persisted_event_id = persisted_event_id + self.computed_event_id = computed_event_id + + message = ( + f"Database corruption: Event {persisted_event_id} in room {room_id} " + f"from the database appears to have been modified (calculated " + f"event id {computed_event_id})" + ) + + super().__init__(message) + + # These values are used in the `enqueue_event` and `_fetch_loop` methods to # control how we batch/bulk fetch events from the database. # The values are plucked out of thing air to make initial sync run faster @@ -166,6 +193,14 @@ class _EventRow: outlier: bool +@attr.s(slots=True, frozen=True, auto_attribs=True) +class EventMetadata: + """Event metadata returned by `get_metadata_for_event(..)`""" + + sender: str + received_ts: int + + class EventRedactBehaviour(Enum): """ What to do when retrieving a redacted event from the database. @@ -304,6 +339,16 @@ class EventsWorkerStore(SQLBaseStore): writers=["master"], ) + # Added to accommodate some queries for the admin API in order to fetch/filter + # membership events by when it was received + self.db_pool.updates.register_background_index_update( + update_name="events_received_ts_index", + index_name="received_ts_idx", + table="events", + columns=("received_ts",), + where_clause="type = 'm.room.member'", + ) + def get_un_partial_stated_events_token(self, instance_name: str) -> int: return ( self._un_partial_stated_events_stream_id_gen.get_current_token_for_writer( @@ -457,6 +502,8 @@ class EventsWorkerStore(SQLBaseStore): ) -> Optional[EventBase]: """Get an event from the database by event_id. + Events for unknown room versions will also be filtered out. + Args: event_id: The event_id of the event to fetch @@ -502,6 +549,7 @@ class EventsWorkerStore(SQLBaseStore): return event + @trace async def get_events( self, event_ids: Collection[str], @@ -511,6 +559,10 @@ class EventsWorkerStore(SQLBaseStore): ) -> Dict[str, EventBase]: """Get events from the database + Unknown events will be omitted from the response. + + Events for unknown room versions will also be filtered out. + Args: event_ids: The event_ids of the events to fetch @@ -529,6 +581,11 @@ class EventsWorkerStore(SQLBaseStore): Returns: A mapping from event_id to event. """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + events = await self.get_events_as_list( event_ids, redact_behaviour=redact_behaviour, @@ -553,6 +610,8 @@ class EventsWorkerStore(SQLBaseStore): Unknown events will be omitted from the response. + Events for unknown room versions will also be filtered out. + Args: event_ids: The event_ids of the events to fetch @@ -574,6 +633,10 @@ class EventsWorkerStore(SQLBaseStore): Note that the returned list may be smaller than the list of event IDs if not all events could be fetched. """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) if not event_ids: return [] @@ -694,10 +757,11 @@ class EventsWorkerStore(SQLBaseStore): return events + @trace @cancellable async def get_unredacted_events_from_cache_or_db( self, - event_ids: Iterable[str], + event_ids: Collection[str], allow_rejected: bool = False, ) -> Dict[str, EventCacheEntry]: """Fetch a bunch of events from the cache or the database. @@ -719,6 +783,11 @@ class EventsWorkerStore(SQLBaseStore): Returns: map from event id to result """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + # Shortcut: check if we have any events in the *in memory* cache - this function # may be called repeatedly for the same event so at this point we cannot reach # out to any external cache for performance reasons. The external cache is @@ -755,9 +824,9 @@ class EventsWorkerStore(SQLBaseStore): if missing_events_ids: - async def get_missing_events_from_cache_or_db() -> ( - Dict[str, EventCacheEntry] - ): + async def get_missing_events_from_cache_or_db() -> Dict[ + str, EventCacheEntry + ]: """Fetches the events in `missing_event_ids` from the database. Also creates entries in `self._current_event_fetches` to allow @@ -907,7 +976,7 @@ class EventsWorkerStore(SQLBaseStore): events, update_metrics=update_metrics ) - missing_event_ids = (e for e in events if e not in event_map) + missing_event_ids = [e for e in events if e not in event_map] event_map.update( await self._get_events_from_external_cache( events=missing_event_ids, @@ -917,8 +986,9 @@ class EventsWorkerStore(SQLBaseStore): return event_map + @trace async def _get_events_from_external_cache( - self, events: Iterable[str], update_metrics: bool = True + self, events: Collection[str], update_metrics: bool = True ) -> Dict[str, EventCacheEntry]: """Fetch events from any configured external cache. @@ -928,6 +998,10 @@ class EventsWorkerStore(SQLBaseStore): events: list of event_ids to fetch update_metrics: Whether to update the cache hit ratio metrics """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "events.length", + str(len(events)), + ) event_map = {} for event_id in events: @@ -1193,6 +1267,7 @@ class EventsWorkerStore(SQLBaseStore): with PreserveLoggingContext(): self.hs.get_reactor().callFromThread(fire_errback, e) + @trace async def _get_events_from_db( self, event_ids: Collection[str] ) -> Dict[str, EventCacheEntry]: @@ -1211,6 +1286,11 @@ class EventsWorkerStore(SQLBaseStore): map from event id to result. May return extra events which weren't asked for. """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + fetched_event_ids: Set[str] = set() fetched_events: Dict[str, _EventRow] = {} @@ -1356,10 +1436,8 @@ class EventsWorkerStore(SQLBaseStore): if original_ev.event_id != event_id: # it's difficult to see what to do here. Pretty much all bets are off # if Synapse cannot rely on the consistency of its database. - raise RuntimeError( - f"Database corruption: Event {event_id} in room {d['room_id']} " - f"from the database appears to have been modified (calculated " - f"event id {original_ev.event_id})" + raise DatabaseCorruptionError( + d["room_id"], event_id, original_ev.event_id ) event_map[event_id] = original_ev @@ -1639,7 +1717,7 @@ class EventsWorkerStore(SQLBaseStore): txn.database_engine, "e.event_id", event_ids ) txn.execute(sql + clause, args) - found_events = {eid for eid, in txn} + found_events = {eid for (eid,) in txn} # ... and then we can update the results for each key return {eid: (eid in found_events) for eid in event_ids} @@ -1838,9 +1916,9 @@ class EventsWorkerStore(SQLBaseStore): " LIMIT ?" ) txn.execute(sql, (-last_id, -current_id, instance_name, limit)) - new_event_updates: List[Tuple[int, Tuple[str, str, str, str, str, str]]] = ( - [] - ) + new_event_updates: List[ + Tuple[int, Tuple[str, str, str, str, str, str]] + ] = [] row: Tuple[int, str, str, str, str, str, str] # Type safety: iterating over `txn` yields `Tuple`, i.e. # `Tuple[Any, ...]` of arbitrary length. Mypy detects assigning a @@ -2439,3 +2517,141 @@ class EventsWorkerStore(SQLBaseStore): ) self.invalidate_get_event_cache_after_txn(txn, event_id) + + async def get_events_sent_by_user_in_room( + self, user_id: str, room_id: str, limit: int, filter: Optional[List[str]] = None + ) -> Optional[List[str]]: + """ + Get a list of event ids of events sent by the user in the specified room + + Args: + user_id: user ID to search against + room_id: room ID of the room to search for events in + filter: type of events to filter for + limit: maximum number of event ids to return + """ + + def _get_events_by_user_in_room_txn( + txn: LoggingTransaction, + user_id: str, + room_id: str, + filter: Optional[List[str]], + batch_size: int, + offset: int, + ) -> Tuple[Optional[List[str]], int]: + if filter: + base_clause, args = make_in_list_sql_clause( + txn.database_engine, "type", filter + ) + clause = f"AND {base_clause}" + parameters = (user_id, room_id, *args, batch_size, offset) + else: + clause = "" + parameters = (user_id, room_id, batch_size, offset) + + sql = f""" + SELECT event_id FROM events + WHERE sender = ? AND room_id = ? + {clause} + ORDER BY received_ts DESC + LIMIT ? + OFFSET ? + """ + txn.execute(sql, parameters) + res = txn.fetchall() + if res: + events = [row[0] for row in res] + else: + events = None + + return events, offset + batch_size + + offset = 0 + batch_size = 100 + if batch_size > limit: + batch_size = limit + + selected_ids: List[str] = [] + while offset < limit: + res, offset = await self.db_pool.runInteraction( + "get_events_by_user", + _get_events_by_user_in_room_txn, + user_id, + room_id, + filter, + batch_size, + offset, + ) + if res: + selected_ids = selected_ids + res + else: + break + return selected_ids + + async def have_finished_sliding_sync_background_jobs(self) -> bool: + """Return if it's safe to use the sliding sync membership tables.""" + + return await self.db_pool.updates.have_completed_background_updates( + ( + _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, + _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE, + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + ) + ) + + async def get_sent_invite_count_by_user(self, user_id: str, from_ts: int) -> int: + """ + Get the number of invites sent by the given user at or after the provided timestamp. + + Args: + user_id: user ID to search against + from_ts: a timestamp in milliseconds from the unix epoch. Filters against + `events.received_ts` + + """ + + def _get_sent_invite_count_by_user_txn( + txn: LoggingTransaction, user_id: str, from_ts: int + ) -> int: + sql = """ + SELECT COUNT(rm.event_id) + FROM room_memberships AS rm + INNER JOIN events AS e USING(event_id) + WHERE rm.sender = ? + AND rm.membership = 'invite' + AND e.type = 'm.room.member' + AND e.received_ts >= ? + """ + + txn.execute(sql, (user_id, from_ts)) + res = txn.fetchone() + + if res is None: + return 0 + return int(res[0]) + + return await self.db_pool.runInteraction( + "_get_sent_invite_count_by_user_txn", + _get_sent_invite_count_by_user_txn, + user_id, + from_ts, + ) + + @cached(tree=True) + async def get_metadata_for_event( + self, room_id: str, event_id: str + ) -> Optional[EventMetadata]: + row = await self.db_pool.simple_select_one( + table="events", + keyvalues={"room_id": room_id, "event_id": event_id}, + retcols=("sender", "received_ts"), + allow_none=True, + desc="get_metadata_for_event", + ) + if row is None: + return None + + return EventMetadata( + sender=row[0], + received_ts=row[1], + ) diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py
index 7617fd3ad4..04866524e3 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py
@@ -19,6 +19,7 @@ # [This file includes modifications made by New Vector Limited] # # +import logging from enum import Enum from typing import ( TYPE_CHECKING, @@ -51,6 +52,8 @@ BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 = ( "media_repository_drop_index_wo_method_2" ) +logger = logging.getLogger(__name__) + @attr.s(slots=True, frozen=True, auto_attribs=True) class LocalMedia: @@ -65,6 +68,7 @@ class LocalMedia: safe_from_quarantine: bool user_id: Optional[str] authenticated: Optional[bool] + sha256: Optional[str] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -79,6 +83,7 @@ class RemoteMedia: last_access_ts: int quarantined_by: Optional[str] authenticated: Optional[bool] + sha256: Optional[str] @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -154,6 +159,26 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): unique=True, ) + self.db_pool.updates.register_background_index_update( + update_name="local_media_repository_sha256_idx", + index_name="local_media_repository_sha256", + table="local_media_repository", + where_clause="sha256 IS NOT NULL", + columns=[ + "sha256", + ], + ) + + self.db_pool.updates.register_background_index_update( + update_name="remote_media_cache_sha256_idx", + index_name="remote_media_cache_sha256", + table="remote_media_cache", + where_clause="sha256 IS NOT NULL", + columns=[ + "sha256", + ], + ) + self.db_pool.updates.register_background_update_handler( BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2, self._drop_media_index_without_method, @@ -221,6 +246,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "safe_from_quarantine", "user_id", "authenticated", + "sha256", ), allow_none=True, desc="get_local_media", @@ -239,6 +265,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): safe_from_quarantine=row[7], user_id=row[8], authenticated=row[9], + sha256=row[10], ) async def get_local_media_by_user_paginate( @@ -295,7 +322,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): quarantined_by, safe_from_quarantine, user_id, - authenticated + authenticated, + sha256 FROM local_media_repository WHERE user_id = ? ORDER BY {order_by_column} {order}, media_id ASC @@ -320,6 +348,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): safe_from_quarantine=bool(row[8]), user_id=row[9], authenticated=row[10], + sha256=row[11], ) for row in txn ] @@ -449,6 +478,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): media_length: int, user_id: UserID, url_cache: Optional[str] = None, + sha256: Optional[str] = None, + quarantined_by: Optional[str] = None, ) -> None: if self.hs.config.media.enable_authenticated_media: authenticated = True @@ -466,6 +497,8 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "user_id": user_id.to_string(), "url_cache": url_cache, "authenticated": authenticated, + "sha256": sha256, + "quarantined_by": quarantined_by, }, desc="store_local_media", ) @@ -477,20 +510,28 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): upload_name: Optional[str], media_length: int, user_id: UserID, + sha256: str, url_cache: Optional[str] = None, + quarantined_by: Optional[str] = None, ) -> None: + updatevalues = { + "media_type": media_type, + "upload_name": upload_name, + "media_length": media_length, + "url_cache": url_cache, + "sha256": sha256, + } + + # This should never be un-set by this function. + if quarantined_by is not None: + updatevalues["quarantined_by"] = quarantined_by + await self.db_pool.simple_update_one( "local_media_repository", keyvalues={ - "user_id": user_id.to_string(), "media_id": media_id, }, - updatevalues={ - "media_type": media_type, - "upload_name": upload_name, - "media_length": media_length, - "url_cache": url_cache, - }, + updatevalues=updatevalues, desc="update_local_media", ) @@ -657,6 +698,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "last_access_ts", "quarantined_by", "authenticated", + "sha256", ), allow_none=True, desc="get_cached_remote_media", @@ -674,6 +716,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): last_access_ts=row[5], quarantined_by=row[6], authenticated=row[7], + sha256=row[8], ) async def store_cached_remote_media( @@ -685,6 +728,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): time_now_ms: int, upload_name: Optional[str], filesystem_id: str, + sha256: Optional[str], ) -> None: if self.hs.config.media.enable_authenticated_media: authenticated = True @@ -703,6 +747,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): "filesystem_id": filesystem_id, "last_access_ts": time_now_ms, "authenticated": authenticated, + "sha256": sha256, }, desc="store_cached_remote_media", ) @@ -729,10 +774,10 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): txn.execute_batch( sql, - ( + [ (time_ms, media_origin, media_id) for media_origin, media_id in remote_media - ), + ], ) sql = ( @@ -740,7 +785,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): " WHERE media_id = ?" ) - txn.execute_batch(sql, ((time_ms, media_id) for media_id in local_media)) + txn.execute_batch(sql, [(time_ms, media_id) for media_id in local_media]) await self.db_pool.runInteraction( "update_cached_last_access_time", update_cache_txn @@ -946,3 +991,46 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): await self.db_pool.runInteraction( "delete_url_cache_media", _delete_url_cache_media_txn ) + + async def get_is_hash_quarantined(self, sha256: str) -> bool: + """Get whether a specific sha256 hash digest matches any quarantined media. + + Returns: + None if the media_id doesn't exist. + """ + + # If we don't have the index yet, performance tanks, so we return False. + # In the background updates, remote_media_cache_sha256_idx is created + # after local_media_repository_sha256_idx, which is why we only need to + # check for the completion of the former. + if not await self.db_pool.updates.has_completed_background_update( + "remote_media_cache_sha256_idx" + ): + return False + + def get_matching_media_txn( + txn: LoggingTransaction, table: str, sha256: str + ) -> bool: + # Return on first match + sql = """ + SELECT 1 + FROM local_media_repository + WHERE sha256 = ? AND quarantined_by IS NOT NULL + + UNION ALL + + SELECT 1 + FROM remote_media_cache + WHERE sha256 = ? AND quarantined_by IS NOT NULL + LIMIT 1 + """ + txn.execute(sql, (sha256, sha256)) + row = txn.fetchone() + return row is not None + + return await self.db_pool.runInteraction( + "get_matching_media_txn", + get_matching_media_txn, + "local_media_repository", + sha256, + ) diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py
index 8e948c5e8d..c384675839 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py
@@ -29,7 +29,6 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.registration import RegistrationWorkerStore from synapse.util.caches.descriptors import cached -from synapse.util.threepids import canonicalise_email if TYPE_CHECKING: from synapse.server import HomeServer @@ -65,18 +64,6 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): self._mau_stats_only = hs.config.server.mau_stats_only - if self._update_on_this_worker: - # Do not add more reserved users than the total allowable number - self.db_pool.new_transaction( - db_conn, - "initialise_mau_threepids", - [], - [], - [], - self._initialise_reserved_users, - hs.config.server.mau_limits_reserved_threepids[: self._max_mau_value], - ) - @cached(num_args=0) async def get_monthly_active_count(self) -> int: """Generates current count of monthly active users @@ -174,26 +161,6 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): return await self.db_pool.runInteraction("list_users", _list_users) - async def get_registered_reserved_users(self) -> List[str]: - """Of the reserved threepids defined in config, retrieve those that are associated - with registered users - - Returns: - User IDs of actual users that are reserved - """ - users = [] - - for tp in self.hs.config.server.mau_limits_reserved_threepids[ - : self.hs.config.server.max_mau_value - ]: - user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( - tp["medium"], canonicalise_email(tp["address"]) - ) - if user_id: - users.append(user_id) - - return users - @cached(num_args=1) async def user_last_seen_monthly_active(self, user_id: str) -> Optional[int]: """ @@ -289,50 +256,10 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): ) self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) - reserved_users = await self.get_registered_reserved_users() await self.db_pool.runInteraction( - "reap_monthly_active_users", _reap_users, reserved_users + "reap_monthly_active_users", _reap_users, [] ) - def _initialise_reserved_users( - self, txn: LoggingTransaction, threepids: List[dict] - ) -> None: - """Ensures that reserved threepids are accounted for in the MAU table, should - be called on start up. - - Args: - txn: - threepids: List of threepid dicts to reserve - """ - assert ( - self._update_on_this_worker - ), "This worker is not designated to update MAUs" - - # XXX what is this function trying to achieve? It upserts into - # monthly_active_users for each *registered* reserved mau user, but why? - # - # - shouldn't there already be an entry for each reserved user (at least - # if they have been active recently)? - # - # - if it's important that the timestamp is kept up to date, why do we only - # run this at startup? - - for tp in threepids: - user_id = self.get_user_id_by_threepid_txn(txn, tp["medium"], tp["address"]) - - if user_id: - is_support = self.is_support_user_txn(txn, user_id) - if not is_support: - # We do this manually here to avoid hitting https://github.com/matrix-org/synapse/issues/6791 - self.db_pool.simple_upsert_txn( - txn, - table="monthly_active_users", - keyvalues={"user_id": user_id}, - values={"timestamp": int(self._clock.time_msec())}, - ) - else: - logger.warning("mau limit reserved threepid %s not found in db" % tp) - async def upsert_monthly_active_user(self, user_id: str) -> None: """Updates or inserts the user into the monthly active user table, which is used to track the current MAU usage of the server @@ -340,9 +267,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): Args: user_id: user to add/update """ - assert ( - self._update_on_this_worker - ), "This worker is not designated to update MAUs" + assert self._update_on_this_worker, ( + "This worker is not designated to update MAUs" + ) # Support user never to be included in MAU stats. Note I can't easily call this # from upsert_monthly_active_user_txn because then I need a _txn form of @@ -379,9 +306,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): txn: user_id: user to add/update """ - assert ( - self._update_on_this_worker - ), "This worker is not designated to update MAUs" + assert self._update_on_this_worker, ( + "This worker is not designated to update MAUs" + ) # Am consciously deciding to lock the table on the basis that is ought # never be a big table and alternative approaches (batching multiple @@ -409,9 +336,9 @@ class MonthlyActiveUsersWorkerStore(RegistrationWorkerStore): Args: user_id: the user_id to query """ - assert ( - self._update_on_this_worker - ), "This worker is not designated to update MAUs" + assert self._update_on_this_worker, ( + "This worker is not designated to update MAUs" + ) if self._limit_usage_by_mau or self._mau_stats_only: # Trial users and guests should not be included as part of MAU group diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py
index 996aea808d..30d8a58d96 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py
@@ -18,8 +18,13 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING, Optional +import json +from typing import TYPE_CHECKING, Dict, Optional, Tuple, cast +from canonicaljson import encode_canonical_json + +from synapse.api.constants import ProfileFields +from synapse.api.errors import Codes, StoreError from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -27,13 +32,17 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.databases.main.roommember import ProfileInfo -from synapse.storage.engines import PostgresEngine -from synapse.types import JsonDict, UserID +from synapse.storage.engines import PostgresEngine, Sqlite3Engine +from synapse.types import JsonDict, JsonValue, UserID if TYPE_CHECKING: from synapse.server import HomeServer +# The number of bytes that the serialized profile can have. +MAX_PROFILE_SIZE = 65536 + + class ProfileWorkerStore(SQLBaseStore): def __init__( self, @@ -144,6 +153,16 @@ class ProfileWorkerStore(SQLBaseStore): return 50 async def get_profileinfo(self, user_id: UserID) -> ProfileInfo: + """ + Fetch the display name and avatar URL of a user. + + Args: + user_id: The user ID to fetch the profile for. + + Returns: + The user's display name and avatar URL. Values may be null if unset + or if the user doesn't exist. + """ profile = await self.db_pool.simple_select_one( table="profiles", keyvalues={"full_user_id": user_id.to_string()}, @@ -158,6 +177,15 @@ class ProfileWorkerStore(SQLBaseStore): return ProfileInfo(avatar_url=profile[1], display_name=profile[0]) async def get_profile_displayname(self, user_id: UserID) -> Optional[str]: + """ + Fetch the display name of a user. + + Args: + user_id: The user to get the display name for. + + Raises: + 404 if the user does not exist. + """ return await self.db_pool.simple_select_one_onecol( table="profiles", keyvalues={"full_user_id": user_id.to_string()}, @@ -166,6 +194,15 @@ class ProfileWorkerStore(SQLBaseStore): ) async def get_profile_avatar_url(self, user_id: UserID) -> Optional[str]: + """ + Fetch the avatar URL of a user. + + Args: + user_id: The user to get the avatar URL for. + + Raises: + 404 if the user does not exist. + """ return await self.db_pool.simple_select_one_onecol( table="profiles", keyvalues={"full_user_id": user_id.to_string()}, @@ -173,7 +210,96 @@ class ProfileWorkerStore(SQLBaseStore): desc="get_profile_avatar_url", ) + async def get_profile_field(self, user_id: UserID, field_name: str) -> JsonValue: + """ + Get a custom profile field for a user. + + Args: + user_id: The user's ID. + field_name: The custom profile field name. + + Returns: + The string value if the field exists, otherwise raises 404. + """ + + def get_profile_field(txn: LoggingTransaction) -> JsonValue: + # This will error if field_name has double quotes in it, but that's not + # possible due to the grammar. + field_path = f'$."{field_name}"' + + if isinstance(self.database_engine, PostgresEngine): + sql = """ + SELECT JSONB_PATH_EXISTS(fields, ?), JSONB_EXTRACT_PATH(fields, ?) + FROM profiles + WHERE user_id = ? + """ + txn.execute( + sql, + (field_path, field_name, user_id.localpart), + ) + + # Test exists first since value being None is used for both + # missing and a null JSON value. + exists, value = cast(Tuple[bool, JsonValue], txn.fetchone()) + if not exists: + raise StoreError(404, "No row found") + return value + + else: + sql = """ + SELECT JSON_TYPE(fields, ?), JSON_EXTRACT(fields, ?) + FROM profiles + WHERE user_id = ? + """ + txn.execute( + sql, + (field_path, field_path, user_id.localpart), + ) + + # If value_type is None, then the value did not exist. + value_type, value = cast( + Tuple[Optional[str], JsonValue], txn.fetchone() + ) + if not value_type: + raise StoreError(404, "No row found") + # If value_type is object or array, then need to deserialize the JSON. + # Scalar values are properly returned directly. + if value_type in ("object", "array"): + assert isinstance(value, str) + return json.loads(value) + return value + + return await self.db_pool.runInteraction("get_profile_field", get_profile_field) + + async def get_profile_fields(self, user_id: UserID) -> Dict[str, str]: + """ + Get all custom profile fields for a user. + + Args: + user_id: The user's ID. + + Returns: + A dictionary of custom profile fields. + """ + result = await self.db_pool.simple_select_one_onecol( + table="profiles", + keyvalues={"full_user_id": user_id.to_string()}, + retcol="fields", + desc="get_profile_fields", + ) + # The SQLite driver doesn't automatically convert JSON to + # Python objects + if isinstance(self.database_engine, Sqlite3Engine) and result: + result = json.loads(result) + return result or {} + async def create_profile(self, user_id: UserID) -> None: + """ + Create a blank profile for a user. + + Args: + user_id: The user to create the profile for. + """ user_localpart = user_id.localpart await self.db_pool.simple_insert( table="profiles", @@ -181,6 +307,71 @@ class ProfileWorkerStore(SQLBaseStore): desc="create_profile", ) + def _check_profile_size( + self, + txn: LoggingTransaction, + user_id: UserID, + new_field_name: str, + new_value: JsonValue, + ) -> None: + # For each entry there are 4 quotes (2 each for key and value), 1 colon, + # and 1 comma. + PER_VALUE_EXTRA = 6 + + # Add the size of the current custom profile fields, ignoring the entry + # which will be overwritten. + if isinstance(txn.database_engine, PostgresEngine): + size_sql = """ + SELECT + OCTET_LENGTH((fields - ?)::text), OCTET_LENGTH(displayname), OCTET_LENGTH(avatar_url) + FROM profiles + WHERE + user_id = ? + """ + txn.execute( + size_sql, + (new_field_name, user_id.localpart), + ) + else: + size_sql = """ + SELECT + LENGTH(json_remove(fields, ?)), LENGTH(displayname), LENGTH(avatar_url) + FROM profiles + WHERE + user_id = ? + """ + txn.execute( + size_sql, + # This will error if field_name has double quotes in it, but that's not + # possible due to the grammar. + (f'$."{new_field_name}"', user_id.localpart), + ) + row = cast(Tuple[Optional[int], Optional[int], Optional[int]], txn.fetchone()) + + # The values return null if the column is null. + total_bytes = ( + # Discount the opening and closing braces to avoid double counting, + # but add one for a comma. + # -2 + 1 = -1 + (row[0] - 1 if row[0] else 0) + + ( + row[1] + len("displayname") + PER_VALUE_EXTRA + if new_field_name != ProfileFields.DISPLAYNAME and row[1] + else 0 + ) + + ( + row[2] + len("avatar_url") + PER_VALUE_EXTRA + if new_field_name != ProfileFields.AVATAR_URL and row[2] + else 0 + ) + ) + + # Add the length of the field being added + the braces. + total_bytes += len(encode_canonical_json({new_field_name: new_value})) + + if total_bytes > MAX_PROFILE_SIZE: + raise StoreError(400, "Profile too large", Codes.PROFILE_TOO_LARGE) + async def set_profile_displayname( self, user_id: UserID, new_displayname: Optional[str] ) -> None: @@ -193,14 +384,25 @@ class ProfileWorkerStore(SQLBaseStore): name is removed. """ user_localpart = user_id.localpart - await self.db_pool.simple_upsert( - table="profiles", - keyvalues={"user_id": user_localpart}, - values={ - "displayname": new_displayname, - "full_user_id": user_id.to_string(), - }, - desc="set_profile_displayname", + + def set_profile_displayname(txn: LoggingTransaction) -> None: + if new_displayname is not None: + self._check_profile_size( + txn, user_id, ProfileFields.DISPLAYNAME, new_displayname + ) + + self.db_pool.simple_upsert_txn( + txn, + table="profiles", + keyvalues={"user_id": user_localpart}, + values={ + "displayname": new_displayname, + "full_user_id": user_id.to_string(), + }, + ) + + await self.db_pool.runInteraction( + "set_profile_displayname", set_profile_displayname ) async def set_profile_avatar_url( @@ -215,13 +417,125 @@ class ProfileWorkerStore(SQLBaseStore): removed. """ user_localpart = user_id.localpart - await self.db_pool.simple_upsert( - table="profiles", - keyvalues={"user_id": user_localpart}, - values={"avatar_url": new_avatar_url, "full_user_id": user_id.to_string()}, - desc="set_profile_avatar_url", + + def set_profile_avatar_url(txn: LoggingTransaction) -> None: + if new_avatar_url is not None: + self._check_profile_size( + txn, user_id, ProfileFields.AVATAR_URL, new_avatar_url + ) + + self.db_pool.simple_upsert_txn( + txn, + table="profiles", + keyvalues={"user_id": user_localpart}, + values={ + "avatar_url": new_avatar_url, + "full_user_id": user_id.to_string(), + }, + ) + + await self.db_pool.runInteraction( + "set_profile_avatar_url", set_profile_avatar_url ) + async def set_profile_field( + self, user_id: UserID, field_name: str, new_value: JsonValue + ) -> None: + """ + Set a custom profile field for a user. + + Args: + user_id: The user's ID. + field_name: The name of the custom profile field. + new_value: The value of the custom profile field. + """ + + # Encode to canonical JSON. + canonical_value = encode_canonical_json(new_value) + + def set_profile_field(txn: LoggingTransaction) -> None: + self._check_profile_size(txn, user_id, field_name, new_value) + + if isinstance(self.database_engine, PostgresEngine): + from psycopg2.extras import Json + + # Note that the || jsonb operator is not recursive, any duplicate + # keys will be taken from the second value. + sql = """ + INSERT INTO profiles (user_id, full_user_id, fields) VALUES (?, ?, JSON_BUILD_OBJECT(?, ?::jsonb)) + ON CONFLICT (user_id) + DO UPDATE SET full_user_id = EXCLUDED.full_user_id, fields = COALESCE(profiles.fields, '{}'::jsonb) || EXCLUDED.fields + """ + + txn.execute( + sql, + ( + user_id.localpart, + user_id.to_string(), + field_name, + # Pass as a JSON object since we have passing bytes disabled + # at the database driver. + Json(json.loads(canonical_value)), + ), + ) + else: + # You may be tempted to use json_patch instead of providing the parameters + # twice, but that recursively merges objects instead of replacing. + sql = """ + INSERT INTO profiles (user_id, full_user_id, fields) VALUES (?, ?, JSON_OBJECT(?, JSON(?))) + ON CONFLICT (user_id) + DO UPDATE SET full_user_id = EXCLUDED.full_user_id, fields = JSON_SET(COALESCE(profiles.fields, '{}'), ?, JSON(?)) + """ + # This will error if field_name has double quotes in it, but that's not + # possible due to the grammar. + json_field_name = f'$."{field_name}"' + + txn.execute( + sql, + ( + user_id.localpart, + user_id.to_string(), + json_field_name, + canonical_value, + json_field_name, + canonical_value, + ), + ) + + await self.db_pool.runInteraction("set_profile_field", set_profile_field) + + async def delete_profile_field(self, user_id: UserID, field_name: str) -> None: + """ + Remove a custom profile field for a user. + + Args: + user_id: The user's ID. + field_name: The name of the custom profile field. + """ + + def delete_profile_field(txn: LoggingTransaction) -> None: + if isinstance(self.database_engine, PostgresEngine): + sql = """ + UPDATE profiles SET fields = fields - ? + WHERE user_id = ? + """ + txn.execute( + sql, + (field_name, user_id.localpart), + ) + else: + sql = """ + UPDATE profiles SET fields = json_remove(fields, ?) + WHERE user_id = ? + """ + txn.execute( + sql, + # This will error if field_name has double quotes in it. + (f'$."{field_name}"', user_id.localpart), + ) + + await self.db_pool.runInteraction("delete_profile_field", delete_profile_field) + class ProfileStore(ProfileWorkerStore): pass diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index 3b81ed943c..a11f522f03 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py
@@ -20,7 +20,7 @@ # import logging -from typing import Any, List, Set, Tuple, cast +from typing import Any, Set, Tuple, cast from synapse.api.errors import SynapseError from synapse.storage.database import LoggingTransaction @@ -199,9 +199,8 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): # Update backward extremeties txn.execute_batch( - "INSERT INTO event_backward_extremities (room_id, event_id)" - " VALUES (?, ?)", - [(room_id, event_id) for event_id, in new_backwards_extrems], + "INSERT INTO event_backward_extremities (room_id, event_id) VALUES (?, ?)", + [(room_id, event_id) for (event_id,) in new_backwards_extrems], ) logger.info("[purge] finding state groups referenced by deleted events") @@ -215,7 +214,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): """ ) - referenced_state_groups = {sg for sg, in txn} + referenced_state_groups = {sg for (sg,) in txn} logger.info( "[purge] found %i referenced state groups", len(referenced_state_groups) ) @@ -332,7 +331,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): return referenced_state_groups - async def purge_room(self, room_id: str) -> List[int]: + async def purge_room(self, room_id: str) -> None: """Deletes all record of a room Args: @@ -348,7 +347,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): # purge any of those rows which were added during the first. logger.info("[purge] Starting initial main purge of [1/2]") - state_groups_to_delete = await self.db_pool.runInteraction( + await self.db_pool.runInteraction( "purge_room", self._purge_room_txn, room_id=room_id, @@ -356,18 +355,15 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): ) logger.info("[purge] Starting secondary main purge of [2/2]") - state_groups_to_delete.extend( - await self.db_pool.runInteraction( - "purge_room", - self._purge_room_txn, - room_id=room_id, - ), + await self.db_pool.runInteraction( + "purge_room", + self._purge_room_txn, + room_id=room_id, ) - logger.info("[purge] Done with main purge") - return state_groups_to_delete + logger.info("[purge] Done with main purge") - def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: + def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> None: # This collides with event persistence so we cannot write new events and metadata into # a room while deleting it or this transaction will fail. if isinstance(self.database_engine, PostgresEngine): @@ -376,18 +372,10 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): (room_id,), ) - # First, fetch all the state groups that should be deleted, before - # we delete that information. - txn.execute( - """ - SELECT DISTINCT state_group FROM events - INNER JOIN event_to_state_groups USING(event_id) - WHERE events.room_id = ? - """, - (room_id,), - ) - - state_groups = [row[0] for row in txn] + if isinstance(self.database_engine, PostgresEngine): + # Disable statement timeouts for this transaction; purging rooms can + # take a while! + txn.execute("SET LOCAL statement_timeout = 0") # Get all the auth chains that are referenced by events that are to be # deleted. @@ -454,6 +442,10 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): # so must be deleted first. "local_current_membership", "room_memberships", + # Note: the sliding_sync_ tables have foreign keys to the `events` table + # so must be deleted first. + "sliding_sync_joined_rooms", + "sliding_sync_membership_snapshots", "events", "federation_inbound_events_staging", "receipts_graph", @@ -504,5 +496,3 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): # periodically anyway (https://github.com/matrix-org/synapse/issues/5888) self._invalidate_caches_for_room_and_stream(txn, room_id) - - return state_groups diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index bbdde17711..86c87f78bf 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py
@@ -109,6 +109,7 @@ def _load_rules( msc3664_enabled=experimental_config.msc3664_enabled, msc3381_polls_enabled=experimental_config.msc3381_polls_enabled, msc4028_push_encrypted_events=experimental_config.msc4028_push_encrypted_events, + msc4210_enabled=experimental_config.msc4210_enabled, ) return filtered_rules diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index 3bde0ae0d4..9964331510 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py
@@ -30,10 +30,12 @@ from typing import ( Mapping, Optional, Sequence, + Set, Tuple, cast, ) +import attr from immutabledict import immutabledict from synapse.api.constants import EduTypes @@ -43,6 +45,7 @@ from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, LoggingTransaction, + make_tuple_in_list_sql_clause, ) from synapse.storage.engines._base import IsolationLevel from synapse.storage.util.id_generators import MultiWriterIdGenerator @@ -51,10 +54,12 @@ from synapse.types import ( JsonMapping, MultiWriterStreamToken, PersistedPosition, + StrCollection, ) from synapse.util import json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.util.iterutils import batch_iter if TYPE_CHECKING: from synapse.server import HomeServer @@ -62,6 +67,57 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +@attr.s(auto_attribs=True, slots=True, frozen=True) +class ReceiptInRoom: + receipt_type: str + user_id: str + event_id: str + thread_id: Optional[str] + data: JsonMapping + + @staticmethod + def merge_to_content(receipts: Collection["ReceiptInRoom"]) -> JsonMapping: + """Merge the given set of receipts (in a room) into the receipt + content format. + + Returns: + A mapping of the combined receipts: event ID -> receipt type -> user + ID -> receipt data. + """ + # MSC4102: always replace threaded receipts with unthreaded ones if + # there is a clash. This means we will drop some receipts, but MSC4102 + # is designed to drop semantically meaningless receipts, so this is + # okay. Previously, we would drop meaningful data! + # + # We do this by finding the unthreaded receipts, and then filtering out + # matching threaded receipts. + + # Set of (user_id, event_id) + unthreaded_receipts: Set[Tuple[str, str]] = { + (receipt.user_id, receipt.event_id) + for receipt in receipts + if receipt.thread_id is None + } + + # event_id -> receipt_type -> user_id -> receipt data + content: Dict[str, Dict[str, Dict[str, JsonMapping]]] = {} + for receipt in receipts: + data = receipt.data + if receipt.thread_id is not None: + if (receipt.user_id, receipt.event_id) in unthreaded_receipts: + # Ignore threaded receipts if we have an unthreaded one. + continue + + data = dict(data) + data["thread_id"] = receipt.thread_id + + content.setdefault(receipt.event_id, {}).setdefault( + receipt.receipt_type, {} + )[receipt.user_id] = data + + return content + + class ReceiptsWorkerStore(SQLBaseStore): def __init__( self, @@ -398,7 +454,7 @@ class ReceiptsWorkerStore(SQLBaseStore): def f( txn: LoggingTransaction, - ) -> List[Tuple[str, str, str, str, Optional[str], str]]: + ) -> Mapping[str, Sequence[ReceiptInRoom]]: if from_key: sql = """ SELECT stream_id, instance_name, room_id, receipt_type, @@ -428,50 +484,46 @@ class ReceiptsWorkerStore(SQLBaseStore): txn.execute(sql + clause, [to_key.get_max_stream_pos()] + list(args)) - return [ - (room_id, receipt_type, user_id, event_id, thread_id, data) - for stream_id, instance_name, room_id, receipt_type, user_id, event_id, thread_id, data in txn - if MultiWriterStreamToken.is_stream_position_in_range( + results: Dict[str, List[ReceiptInRoom]] = {} + for ( + stream_id, + instance_name, + room_id, + receipt_type, + user_id, + event_id, + thread_id, + data, + ) in txn: + if not MultiWriterStreamToken.is_stream_position_in_range( from_key, to_key, instance_name, stream_id + ): + continue + + results.setdefault(room_id, []).append( + ReceiptInRoom( + receipt_type=receipt_type, + user_id=user_id, + event_id=event_id, + thread_id=thread_id, + data=db_to_json(data), + ) ) - ] + + return results txn_results = await self.db_pool.runInteraction( "_get_linearized_receipts_for_rooms", f ) - results: JsonDict = {} - for room_id, receipt_type, user_id, event_id, thread_id, data in txn_results: - # We want a single event per room, since we want to batch the - # receipts by room, event and type. - room_event = results.setdefault( - room_id, - {"type": EduTypes.RECEIPT, "room_id": room_id, "content": {}}, - ) - - # The content is of the form: - # {"$foo:bar": { "read": { "@user:host": <receipt> }, .. }, .. } - event_entry = room_event["content"].setdefault(event_id, {}) - receipt_type_dict = event_entry.setdefault(receipt_type, {}) - - # MSC4102: always replace threaded receipts with unthreaded ones if there is a clash. - # Specifically: - # - if there is no existing receipt, great, set the data. - # - if there is an existing receipt, is it threaded (thread_id present)? - # YES: replace if this receipt has no thread id. NO: do not replace. - # This means we will drop some receipts, but MSC4102 is designed to drop semantically - # meaningless receipts, so this is okay. Previously, we would drop meaningful data! - receipt_data = db_to_json(data) - if user_id in receipt_type_dict: # existing receipt - # is the existing receipt threaded and we are currently processing an unthreaded one? - if "thread_id" in receipt_type_dict[user_id] and not thread_id: - receipt_type_dict[user_id] = ( - receipt_data # replace with unthreaded one - ) - else: # receipt does not exist, just set it - receipt_type_dict[user_id] = receipt_data - if thread_id: - receipt_type_dict[user_id]["thread_id"] = thread_id + results: JsonDict = { + room_id: { + "room_id": room_id, + "type": EduTypes.RECEIPT, + "content": ReceiptInRoom.merge_to_content(receipts), + } + for room_id, receipts in txn_results.items() + } results = { room_id: [results[room_id]] if room_id in results else [] @@ -479,6 +531,69 @@ class ReceiptsWorkerStore(SQLBaseStore): } return results + async def get_linearized_receipts_for_events( + self, + room_and_event_ids: Collection[Tuple[str, str]], + ) -> Mapping[str, Sequence[ReceiptInRoom]]: + """Get all receipts for the given set of events. + + Arguments: + room_and_event_ids: A collection of 2-tuples of room ID and + event IDs to fetch receipts for + + Returns: + A list of receipts, one per room. + """ + if not room_and_event_ids: + return {} + + def get_linearized_receipts_for_events_txn( + txn: LoggingTransaction, + room_id_event_id_tuples: Collection[Tuple[str, str]], + ) -> List[Tuple[str, str, str, str, Optional[str], str]]: + clause, args = make_tuple_in_list_sql_clause( + self.database_engine, ("room_id", "event_id"), room_id_event_id_tuples + ) + + sql = f""" + SELECT room_id, receipt_type, user_id, event_id, thread_id, data + FROM receipts_linearized + WHERE {clause} + """ + + txn.execute(sql, args) + + return txn.fetchall() + + # room_id -> receipts + room_to_receipts: Dict[str, List[ReceiptInRoom]] = {} + for batch in batch_iter(room_and_event_ids, 1000): + batch_results = await self.db_pool.runInteraction( + "get_linearized_receipts_for_events", + get_linearized_receipts_for_events_txn, + batch, + ) + + for ( + room_id, + receipt_type, + user_id, + event_id, + thread_id, + data, + ) in batch_results: + room_to_receipts.setdefault(room_id, []).append( + ReceiptInRoom( + receipt_type=receipt_type, + user_id=user_id, + event_id=event_id, + thread_id=thread_id, + data=db_to_json(data), + ) + ) + + return room_to_receipts + @cached( num_args=2, ) @@ -550,6 +665,114 @@ class ReceiptsWorkerStore(SQLBaseStore): return results + async def get_linearized_receipts_for_user_in_rooms( + self, user_id: str, room_ids: StrCollection, to_key: MultiWriterStreamToken + ) -> Mapping[str, Sequence[ReceiptInRoom]]: + """Fetch all receipts for the user in the given room. + + Returns: + A dict from room ID to receipts in the room. + """ + + def get_linearized_receipts_for_user_in_rooms_txn( + txn: LoggingTransaction, + batch_room_ids: StrCollection, + ) -> List[Tuple[str, str, str, str, Optional[str], str]]: + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", batch_room_ids + ) + + sql = f""" + SELECT instance_name, stream_id, room_id, receipt_type, user_id, event_id, thread_id, data + FROM receipts_linearized + WHERE {clause} AND user_id = ? AND stream_id <= ? + """ + + args.append(user_id) + args.append(to_key.get_max_stream_pos()) + + txn.execute(sql, args) + + return [ + (room_id, receipt_type, user_id, event_id, thread_id, data) + for instance_name, stream_id, room_id, receipt_type, user_id, event_id, thread_id, data in txn + if MultiWriterStreamToken.is_stream_position_in_range( + low=None, + high=to_key, + instance_name=instance_name, + pos=stream_id, + ) + ] + + # room_id -> receipts + room_to_receipts: Dict[str, List[ReceiptInRoom]] = {} + for batch in batch_iter(room_ids, 1000): + batch_results = await self.db_pool.runInteraction( + "get_linearized_receipts_for_events", + get_linearized_receipts_for_user_in_rooms_txn, + batch, + ) + + for ( + room_id, + receipt_type, + user_id, + event_id, + thread_id, + data, + ) in batch_results: + room_to_receipts.setdefault(room_id, []).append( + ReceiptInRoom( + receipt_type=receipt_type, + user_id=user_id, + event_id=event_id, + thread_id=thread_id, + data=db_to_json(data), + ) + ) + + return room_to_receipts + + async def get_rooms_with_receipts_between( + self, + room_ids: StrCollection, + from_key: MultiWriterStreamToken, + to_key: MultiWriterStreamToken, + ) -> StrCollection: + """Given a set of room_ids, find out which ones (may) have receipts + between the two tokens (> `from_token` and <= `to_token`).""" + + room_ids = self._receipts_stream_cache.get_entities_changed( + room_ids, from_key.stream + ) + if not room_ids: + return [] + + def f(txn: LoggingTransaction, room_ids: StrCollection) -> StrCollection: + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", room_ids + ) + + sql = f""" + SELECT DISTINCT room_id FROM receipts_linearized + WHERE {clause} AND ? < stream_id AND stream_id <= ? + """ + args.append(from_key.stream) + args.append(to_key.get_max_stream_pos()) + + txn.execute(sql, args) + + return [room_id for (room_id,) in txn] + + results: List[str] = [] + for batch in batch_iter(room_ids, 1000): + batch_result = await self.db_pool.runInteraction( + "get_rooms_with_receipts_between", f, batch + ) + results.extend(batch_result) + + return results + async def get_users_sent_receipts_between( self, last_id: int, current_id: int ) -> List[str]: @@ -807,9 +1030,7 @@ class ReceiptsWorkerStore(SQLBaseStore): SELECT event_id WHERE room_id = ? AND stream_ordering IN ( SELECT max(stream_ordering) WHERE %s ) - """ % ( - clause, - ) + """ % (clause,) txn.execute(sql, [room_id] + list(args)) rows = txn.fetchall() @@ -954,6 +1175,12 @@ class ReceiptsBackgroundUpdateStore(SQLBaseStore): self.RECEIPTS_GRAPH_UNIQUE_INDEX_UPDATE_NAME, self._background_receipts_graph_unique_index, ) + self.db_pool.updates.register_background_index_update( + update_name="receipts_room_id_event_id_index", + index_name="receipts_linearized_event_id", + table="receipts_linearized", + columns=("room_id", "event_id"), + ) async def _populate_receipt_event_stream_ordering( self, progress: JsonDict, batch_size: int diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index df7f8a43b7..868803e169 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py
@@ -32,7 +32,6 @@ from synapse.api.errors import ( NotFoundError, StoreError, SynapseError, - ThreepidValidationError, ) from synapse.config.homeserver import HomeServerConfig from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -149,30 +148,6 @@ class LoginTokenLookupResult: """The session ID advertised by the SSO Identity Provider.""" -@attr.s(frozen=True, slots=True, auto_attribs=True) -class ThreepidResult: - medium: str - address: str - validated_at: int - added_at: int - - -@attr.s(frozen=True, slots=True, auto_attribs=True) -class ThreepidValidationSession: - address: str - """address of the 3pid""" - medium: str - """medium of the 3pid""" - client_secret: str - """a secret provided by the client for this validation session""" - session_id: str - """ID of the validation session""" - last_send_attempt: int - """a number serving to dedupe send attempts for this session""" - validated_at: Optional[int] - """timestamp of when this session was validated if so""" - - class RegistrationWorkerStore(CacheInvalidationWorkerStore): def __init__( self, @@ -215,12 +190,6 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): self._set_expiration_date_when_missing, ) - # Create a background job for culling expired 3PID validity tokens - if hs.config.worker.run_background_tasks: - self._clock.looping_call( - self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS - ) - @cached() async def get_user_by_id(self, user_id: str) -> Optional[UserInfo]: """Returns info about the user account, if it exists.""" @@ -583,7 +552,9 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): await self.db_pool.runInteraction("set_shadow_banned", set_shadow_banned_txn) - async def set_user_type(self, user: UserID, user_type: Optional[UserTypes]) -> None: + async def set_user_type( + self, user: UserID, user_type: Optional[Union[UserTypes, str]] + ) -> None: """Sets the user type. Args: @@ -683,7 +654,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): retcol="user_type", allow_none=True, ) - return res is None + return res is None or res not in [UserTypes.BOT, UserTypes.SUPPORT] def is_support_user_txn(self, txn: LoggingTransaction, user_id: str) -> bool: res = self.db_pool.simple_select_one_onecol_txn( @@ -759,17 +730,37 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): external_id: id on that system user_id: complete mxid that it is mapped to """ + self._invalidate_cache_and_stream( + txn, self.get_user_by_external_id, (auth_provider, external_id) + ) - self.db_pool.simple_insert_txn( + # This INSERT ... ON CONFLICT DO NOTHING statement will cause a + # 'could not serialize access due to concurrent update' + # if the row is added concurrently by another transaction. + # This is exactly what we want, as it makes the transaction get retried + # in a new snapshot where we can check for a genuine conflict. + was_inserted = self.db_pool.simple_upsert_txn( txn, table="user_external_ids", - values={ - "auth_provider": auth_provider, - "external_id": external_id, - "user_id": user_id, - }, + keyvalues={"auth_provider": auth_provider, "external_id": external_id}, + values={}, + insertion_values={"user_id": user_id}, ) + if not was_inserted: + existing_id = self.db_pool.simple_select_one_onecol_txn( + txn, + table="user_external_ids", + keyvalues={"auth_provider": auth_provider, "user_id": user_id}, + retcol="external_id", + allow_none=True, + ) + + if existing_id != external_id: + raise ExternalIDReuseException( + f"{user_id!r} has external id {existing_id!r} for {auth_provider} but trying to add {external_id!r}" + ) + async def remove_user_external_id( self, auth_provider: str, external_id: str, user_id: str ) -> None: @@ -789,6 +780,9 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): }, desc="remove_user_external_id", ) + await self.invalidate_cache_and_stream( + "get_user_by_external_id", (auth_provider, external_id) + ) async def replace_user_external_id( self, @@ -809,29 +803,20 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): ExternalIDReuseException if the new external_id could not be mapped. """ - def _remove_user_external_ids_txn( + def _replace_user_external_id_txn( txn: LoggingTransaction, - user_id: str, ) -> None: - """Remove all mappings from external user ids to a mxid - If these mappings are not found, this method does nothing. - - Args: - user_id: complete mxid that it is mapped to - """ - self.db_pool.simple_delete_txn( txn, table="user_external_ids", keyvalues={"user_id": user_id}, ) - def _replace_user_external_id_txn( - txn: LoggingTransaction, - ) -> None: - _remove_user_external_ids_txn(txn, user_id) - for auth_provider, external_id in record_external_ids: + self._invalidate_cache_and_stream( + txn, self.get_user_by_external_id, (auth_provider, external_id) + ) + self._record_user_external_id_txn( txn, auth_provider, @@ -847,6 +832,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): except self.database_engine.module.IntegrityError: raise ExternalIDReuseException() + @cached() async def get_user_by_external_id( self, auth_provider: str, external_id: str ) -> Optional[str]: @@ -944,10 +930,12 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): return await self.db_pool.runInteraction("count_users", _count_users) async def count_real_users(self) -> int: - """Counts all users without a special user_type registered on the homeserver.""" + """Counts all users without the bot or support user_types registered on the homeserver.""" def _count_users(txn: LoggingTransaction) -> int: - txn.execute("SELECT COUNT(*) FROM users where user_type is null") + txn.execute( + f"SELECT COUNT(*) FROM users WHERE user_type IS NULL OR user_type NOT IN ('{UserTypes.BOT}', '{UserTypes.SUPPORT}')" + ) row = txn.fetchone() assert row is not None return row[0] @@ -965,161 +953,6 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): return str(next_id) - async def get_user_id_by_threepid(self, medium: str, address: str) -> Optional[str]: - """Returns user id from threepid - - Args: - medium: threepid medium e.g. email - address: threepid address e.g. me@example.com. This must already be - in canonical form. - - Returns: - The user ID or None if no user id/threepid mapping exists - """ - user_id = await self.db_pool.runInteraction( - "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, medium, address - ) - return user_id - - def get_user_id_by_threepid_txn( - self, txn: LoggingTransaction, medium: str, address: str - ) -> Optional[str]: - """Returns user id from threepid - - Args: - txn: - medium: threepid medium e.g. email - address: threepid address e.g. me@example.com - - Returns: - user id, or None if no user id/threepid mapping exists - """ - return self.db_pool.simple_select_one_onecol_txn( - txn, - "user_threepids", - {"medium": medium, "address": address}, - "user_id", - True, - ) - - async def user_add_threepid( - self, - user_id: str, - medium: str, - address: str, - validated_at: int, - added_at: int, - ) -> None: - await self.db_pool.simple_upsert( - "user_threepids", - {"medium": medium, "address": address}, - {"user_id": user_id, "validated_at": validated_at, "added_at": added_at}, - ) - - async def user_get_threepids(self, user_id: str) -> List[ThreepidResult]: - results = cast( - List[Tuple[str, str, int, int]], - await self.db_pool.simple_select_list( - "user_threepids", - keyvalues={"user_id": user_id}, - retcols=["medium", "address", "validated_at", "added_at"], - desc="user_get_threepids", - ), - ) - return [ - ThreepidResult( - medium=r[0], - address=r[1], - validated_at=r[2], - added_at=r[3], - ) - for r in results - ] - - async def user_delete_threepid( - self, user_id: str, medium: str, address: str - ) -> None: - await self.db_pool.simple_delete( - "user_threepids", - keyvalues={"user_id": user_id, "medium": medium, "address": address}, - desc="user_delete_threepid", - ) - - async def add_user_bound_threepid( - self, user_id: str, medium: str, address: str, id_server: str - ) -> None: - """The server proxied a bind request to the given identity server on - behalf of the given user. We need to remember this in case the user - asks us to unbind the threepid. - - Args: - user_id - medium - address - id_server - """ - # We need to use an upsert, in case they user had already bound the - # threepid - await self.db_pool.simple_upsert( - table="user_threepid_id_server", - keyvalues={ - "user_id": user_id, - "medium": medium, - "address": address, - "id_server": id_server, - }, - values={}, - insertion_values={}, - desc="add_user_bound_threepid", - ) - - async def user_get_bound_threepids(self, user_id: str) -> List[Tuple[str, str]]: - """Get the threepids that a user has bound to an identity server through the homeserver - The homeserver remembers where binds to an identity server occurred. Using this - method can retrieve those threepids. - - Args: - user_id: The ID of the user to retrieve threepids for - - Returns: - List of tuples of two strings: - medium: The medium of the threepid (e.g "email") - address: The address of the threepid (e.g "bob@example.com") - """ - return cast( - List[Tuple[str, str]], - await self.db_pool.simple_select_list( - table="user_threepid_id_server", - keyvalues={"user_id": user_id}, - retcols=["medium", "address"], - desc="user_get_bound_threepids", - ), - ) - - async def remove_user_bound_threepid( - self, user_id: str, medium: str, address: str, id_server: str - ) -> None: - """The server proxied an unbind request to the given identity server on - behalf of the given user, so we remove the mapping of threepid to - identity server. - - Args: - user_id - medium - address - id_server - """ - await self.db_pool.simple_delete( - table="user_threepid_id_server", - keyvalues={ - "user_id": user_id, - "medium": medium, - "address": address, - "id_server": id_server, - }, - desc="remove_user_bound_threepid", - ) - async def get_id_servers_user_bound( self, user_id: str, medium: str, address: str ) -> List[str]: @@ -1204,123 +1037,6 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): return bool(res) - async def get_threepid_validation_session( - self, - medium: Optional[str], - client_secret: str, - address: Optional[str] = None, - sid: Optional[str] = None, - validated: Optional[bool] = True, - ) -> Optional[ThreepidValidationSession]: - """Gets a session_id and last_send_attempt (if available) for a - combination of validation metadata - - Args: - medium: The medium of the 3PID - client_secret: A unique string provided by the client to help identify this - validation attempt - address: The address of the 3PID - sid: The ID of the validation session - validated: Whether sessions should be filtered by - whether they have been validated already or not. None to - perform no filtering - - Returns: - A ThreepidValidationSession or None if a validation session is not found - """ - if not client_secret: - raise SynapseError( - 400, "Missing parameter: client_secret", errcode=Codes.MISSING_PARAM - ) - - keyvalues = {"client_secret": client_secret} - if medium: - keyvalues["medium"] = medium - if address: - keyvalues["address"] = address - if sid: - keyvalues["session_id"] = sid - - assert address or sid - - def get_threepid_validation_session_txn( - txn: LoggingTransaction, - ) -> Optional[ThreepidValidationSession]: - sql = """ - SELECT address, session_id, medium, client_secret, - last_send_attempt, validated_at - FROM threepid_validation_session WHERE %s - """ % ( - " AND ".join("%s = ?" % k for k in keyvalues.keys()), - ) - - if validated is not None: - sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL") - - sql += " LIMIT 1" - - txn.execute(sql, list(keyvalues.values())) - row = txn.fetchone() - if not row: - return None - - return ThreepidValidationSession( - address=row[0], - session_id=row[1], - medium=row[2], - client_secret=row[3], - last_send_attempt=row[4], - validated_at=row[5], - ) - - return await self.db_pool.runInteraction( - "get_threepid_validation_session", get_threepid_validation_session_txn - ) - - async def delete_threepid_session(self, session_id: str) -> None: - """Removes a threepid validation session from the database. This can - be done after validation has been performed and whatever action was - waiting on it has been carried out - - Args: - session_id: The ID of the session to delete - """ - - def delete_threepid_session_txn(txn: LoggingTransaction) -> None: - self.db_pool.simple_delete_txn( - txn, - table="threepid_validation_token", - keyvalues={"session_id": session_id}, - ) - self.db_pool.simple_delete_txn( - txn, - table="threepid_validation_session", - keyvalues={"session_id": session_id}, - ) - - await self.db_pool.runInteraction( - "delete_threepid_session", delete_threepid_session_txn - ) - - @wrap_as_background_process("cull_expired_threepid_validation_tokens") - async def cull_expired_threepid_validation_tokens(self) -> None: - """Remove threepid validation tokens with expiry dates that have passed""" - - def cull_expired_threepid_validation_tokens_txn( - txn: LoggingTransaction, ts: int - ) -> None: - sql = """ - DELETE FROM threepid_validation_token WHERE - expires < ? - """ - txn.execute(sql, (ts,)) - - await self.db_pool.runInteraction( - "cull_expired_threepid_validation_tokens", - cull_expired_threepid_validation_tokens_txn, - self._clock.time_msec(), - ) - @wrap_as_background_process("account_validity_set_expiration_dates") async def _set_expiration_date_when_missing(self) -> None: """ @@ -1512,15 +1228,14 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): # Override type because the return type is only optional if # allow_none is True, and we don't want mypy throwing errors # about None not being indexable. - pending, completed = cast( - Tuple[int, int], - self.db_pool.simple_select_one_txn( - txn, - "registration_tokens", - keyvalues={"token": token}, - retcols=["pending", "completed"], - ), + row = self.db_pool.simple_select_one_txn( + txn, + "registration_tokens", + keyvalues={"token": token}, + retcols=("pending", "completed"), ) + pending = int(row[0]) + completed = int(row[1]) # Decrement pending and increment completed self.db_pool.simple_update_one_txn( @@ -2093,6 +1808,136 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): func=is_user_approved_txn, ) + async def set_user_deactivated_status( + self, user_id: str, deactivated: bool + ) -> None: + """Set the `deactivated` property for the provided user to the provided value. + + Args: + user_id: The ID of the user to set the status for. + deactivated: The value to set for `deactivated`. + """ + + await self.db_pool.runInteraction( + "set_user_deactivated_status", + self.set_user_deactivated_status_txn, + user_id, + deactivated, + ) + + def set_user_deactivated_status_txn( + self, txn: LoggingTransaction, user_id: str, deactivated: bool + ) -> None: + self.db_pool.simple_update_one_txn( + txn=txn, + table="users", + keyvalues={"name": user_id}, + updatevalues={"deactivated": 1 if deactivated else 0}, + ) + self._invalidate_cache_and_stream( + txn, self.get_user_deactivated_status, (user_id,) + ) + self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) + self._invalidate_cache_and_stream(txn, self.is_guest, (user_id,)) + + async def set_user_suspended_status(self, user_id: str, suspended: bool) -> None: + """ + Set whether the user's account is suspended in the `users` table. + + Args: + user_id: The user ID of the user in question + suspended: True if the user is suspended, false if not + """ + await self.db_pool.runInteraction( + "set_user_suspended_status", + self.set_user_suspended_status_txn, + user_id, + suspended, + ) + + def set_user_suspended_status_txn( + self, txn: LoggingTransaction, user_id: str, suspended: bool + ) -> None: + self.db_pool.simple_update_one_txn( + txn=txn, + table="users", + keyvalues={"name": user_id}, + updatevalues={"suspended": suspended}, + ) + self._invalidate_cache_and_stream( + txn, self.get_user_suspended_status, (user_id,) + ) + self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) + + async def set_user_locked_status(self, user_id: str, locked: bool) -> None: + """Set the `locked` property for the provided user to the provided value. + + Args: + user_id: The ID of the user to set the status for. + locked: The value to set for `locked`. + """ + + await self.db_pool.runInteraction( + "set_user_locked_status", + self.set_user_locked_status_txn, + user_id, + locked, + ) + + def set_user_locked_status_txn( + self, txn: LoggingTransaction, user_id: str, locked: bool + ) -> None: + self.db_pool.simple_update_one_txn( + txn=txn, + table="users", + keyvalues={"name": user_id}, + updatevalues={"locked": locked}, + ) + self._invalidate_cache_and_stream(txn, self.get_user_locked_status, (user_id,)) + self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) + + async def update_user_approval_status( + self, user_id: UserID, approved: bool + ) -> None: + """Set the user's 'approved' flag to the given value. + + The boolean will be turned into an int (in update_user_approval_status_txn) + because the column is a smallint. + + Args: + user_id: the user to update the flag for. + approved: the value to set the flag to. + """ + await self.db_pool.runInteraction( + "update_user_approval_status", + self.update_user_approval_status_txn, + user_id.to_string(), + approved, + ) + + def update_user_approval_status_txn( + self, txn: LoggingTransaction, user_id: str, approved: bool + ) -> None: + """Set the user's 'approved' flag to the given value. + + The boolean is turned into an int because the column is a smallint. + + Args: + txn: the current database transaction. + user_id: the user to update the flag for. + approved: the value to set the flag to. + """ + self.db_pool.simple_update_one_txn( + txn=txn, + table="users", + keyvalues={"name": user_id}, + updatevalues={"approved": approved}, + ) + + # Invalidate the caches of methods that read the value of the 'approved' flag. + self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) + self._invalidate_cache_and_stream(txn, self.is_user_approved, (user_id,)) + class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): def __init__( @@ -2205,117 +2050,6 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): return nb_processed - async def set_user_deactivated_status( - self, user_id: str, deactivated: bool - ) -> None: - """Set the `deactivated` property for the provided user to the provided value. - - Args: - user_id: The ID of the user to set the status for. - deactivated: The value to set for `deactivated`. - """ - - await self.db_pool.runInteraction( - "set_user_deactivated_status", - self.set_user_deactivated_status_txn, - user_id, - deactivated, - ) - - def set_user_deactivated_status_txn( - self, txn: LoggingTransaction, user_id: str, deactivated: bool - ) -> None: - self.db_pool.simple_update_one_txn( - txn=txn, - table="users", - keyvalues={"name": user_id}, - updatevalues={"deactivated": 1 if deactivated else 0}, - ) - self._invalidate_cache_and_stream( - txn, self.get_user_deactivated_status, (user_id,) - ) - self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) - txn.call_after(self.is_guest.invalidate, (user_id,)) - - async def set_user_suspended_status(self, user_id: str, suspended: bool) -> None: - """ - Set whether the user's account is suspended in the `users` table. - - Args: - user_id: The user ID of the user in question - suspended: True if the user is suspended, false if not - """ - await self.db_pool.runInteraction( - "set_user_suspended_status", - self.set_user_suspended_status_txn, - user_id, - suspended, - ) - - def set_user_suspended_status_txn( - self, txn: LoggingTransaction, user_id: str, suspended: bool - ) -> None: - self.db_pool.simple_update_one_txn( - txn=txn, - table="users", - keyvalues={"name": user_id}, - updatevalues={"suspended": suspended}, - ) - self._invalidate_cache_and_stream( - txn, self.get_user_suspended_status, (user_id,) - ) - self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) - - async def set_user_locked_status(self, user_id: str, locked: bool) -> None: - """Set the `locked` property for the provided user to the provided value. - - Args: - user_id: The ID of the user to set the status for. - locked: The value to set for `locked`. - """ - - await self.db_pool.runInteraction( - "set_user_locked_status", - self.set_user_locked_status_txn, - user_id, - locked, - ) - - def set_user_locked_status_txn( - self, txn: LoggingTransaction, user_id: str, locked: bool - ) -> None: - self.db_pool.simple_update_one_txn( - txn=txn, - table="users", - keyvalues={"name": user_id}, - updatevalues={"locked": locked}, - ) - self._invalidate_cache_and_stream(txn, self.get_user_locked_status, (user_id,)) - self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) - - def update_user_approval_status_txn( - self, txn: LoggingTransaction, user_id: str, approved: bool - ) -> None: - """Set the user's 'approved' flag to the given value. - - The boolean is turned into an int because the column is a smallint. - - Args: - txn: the current database transaction. - user_id: the user to update the flag for. - approved: the value to set the flag to. - """ - self.db_pool.simple_update_one_txn( - txn=txn, - table="users", - keyvalues={"name": user_id}, - updatevalues={"approved": approved}, - ) - - # Invalidate the caches of methods that read the value of the 'approved' flag. - self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,)) - self._invalidate_cache_and_stream(txn, self.is_user_approved, (user_id,)) - class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): def __init__( @@ -2326,9 +2060,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): ): super().__init__(database, db_conn, hs) - self._ignore_unknown_session_error = ( - hs.config.server.request_token_inhibit_3pid_errors - ) + self._ignore_unknown_session_error = False # Used to use whether 3pid errors were suppressed or not... Problem? self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id") self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id") @@ -2514,7 +2246,8 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): the user, setting their displayname to the given value admin: is an admin user? user_type: type of user. One of the values from api.constants.UserTypes, - or None for a normal user. + a custom value set in the configuration file, or None for a normal + user. shadow_banned: Whether the user is shadow-banned, i.e. they may be told their requests succeeded but we ignore them. approved: Whether to consider the user has already been approved by an @@ -2796,96 +2529,6 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): desc="add_user_pending_deactivation", ) - async def validate_threepid_session( - self, session_id: str, client_secret: str, token: str, current_ts: int - ) -> Optional[str]: - """Attempt to validate a threepid session using a token - - Args: - session_id: The id of a validation session - client_secret: A unique string provided by the client to help identify - this validation attempt - token: A validation token - current_ts: The current unix time in milliseconds. Used for checking - token expiry status - - Raises: - ThreepidValidationError: if a matching validation token was not found or has - expired - - Returns: - A str representing a link to redirect the user to if there is one. - """ - - # Insert everything into a transaction in order to run atomically - def validate_threepid_session_txn(txn: LoggingTransaction) -> Optional[str]: - row = self.db_pool.simple_select_one_txn( - txn, - table="threepid_validation_session", - keyvalues={"session_id": session_id}, - retcols=["client_secret", "validated_at"], - allow_none=True, - ) - - if not row: - if self._ignore_unknown_session_error: - # If we need to inhibit the error caused by an incorrect session ID, - # use None as placeholder values for the client secret and the - # validation timestamp. - # It shouldn't be an issue because they're both only checked after - # the token check, which should fail. And if it doesn't for some - # reason, the next check is on the client secret, which is NOT NULL, - # so we don't have to worry about the client secret matching by - # accident. - row = None, None - else: - raise ThreepidValidationError("Unknown session_id") - - retrieved_client_secret, validated_at = row - - row = self.db_pool.simple_select_one_txn( - txn, - table="threepid_validation_token", - keyvalues={"session_id": session_id, "token": token}, - retcols=["expires", "next_link"], - allow_none=True, - ) - - if not row: - raise ThreepidValidationError( - "Validation token not found or has expired" - ) - expires, next_link = row - - if retrieved_client_secret != client_secret: - raise ThreepidValidationError( - "This client_secret does not match the provided session_id" - ) - - # If the session is already validated, no need to revalidate - if validated_at: - return next_link - - if expires <= current_ts: - raise ThreepidValidationError( - "This token has expired. Please request a new one" - ) - - # Looks good. Validate the session - self.db_pool.simple_update_txn( - txn, - table="threepid_validation_session", - keyvalues={"session_id": session_id}, - updatevalues={"validated_at": self._clock.time_msec()}, - ) - - return next_link - - # Return next_link if it exists - return await self.db_pool.runInteraction( - "validate_threepid_session_txn", validate_threepid_session_txn - ) - async def start_or_continue_validation_session( self, medium: str, @@ -2944,25 +2587,6 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore): start_or_continue_validation_session_txn, ) - async def update_user_approval_status( - self, user_id: UserID, approved: bool - ) -> None: - """Set the user's 'approved' flag to the given value. - - The boolean will be turned into an int (in update_user_approval_status_txn) - because the column is a smallint. - - Args: - user_id: the user to update the flag for. - approved: the value to set the flag to. - """ - await self.db_pool.runInteraction( - "update_user_approval_status", - self.update_user_approval_status_txn, - user_id.to_string(), - approved, - ) - @wrap_as_background_process("delete_expired_login_tokens") async def _delete_expired_login_tokens(self) -> None: """Remove login tokens with expiry dates that have passed.""" diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 80a4bf95f2..347dbbba6b 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py
@@ -51,11 +51,15 @@ from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.config.homeserver import HomeServerConfig from synapse.events import EventBase from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStream -from synapse.storage._base import db_to_json, make_in_list_sql_clause +from synapse.storage._base import ( + db_to_json, + make_in_list_sql_clause, +) from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, LoggingTransaction, + make_tuple_in_list_sql_clause, ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.types import Cursor @@ -73,6 +77,8 @@ logger = logging.getLogger(__name__) @attr.s(slots=True, frozen=True, auto_attribs=True) class RatelimitOverride: + # n.b. elsewhere in Synapse messages_per_second is represented as a float, but it is + # an integer in the database messages_per_second: int burst_count: int @@ -604,6 +610,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): search_term: Optional[str], public_rooms: Optional[bool], empty_rooms: Optional[bool], + emma_include_tombstone: bool = False, ) -> Tuple[List[Dict[str, Any]], int]: """Function to retrieve a paginated list of rooms as json. @@ -623,6 +630,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): If true, empty rooms are queried. if false, empty rooms are excluded from the query. When it is none (the default), both empty rooms and none-empty rooms are queried. + emma_include_tombstone: If true, include tombstone events in the results. Returns: A list of room dicts and an integer representing the total number of rooms that exist given this query @@ -791,11 +799,43 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): room_count = cast(Tuple[int], txn.fetchone()) return rooms, room_count[0] - return await self.db_pool.runInteraction( + result = await self.db_pool.runInteraction( "get_rooms_paginate", _get_rooms_paginate_txn, ) + if emma_include_tombstone: + room_id_sql, room_id_args = make_in_list_sql_clause( + self.database_engine, "cse.room_id", [r["room_id"] for r in result[0]] + ) + + tombstone_sql = """ + SELECT cse.room_id, cse.event_id, ej.json + FROM current_state_events cse + JOIN event_json ej USING (event_id) + WHERE cse.type = 'm.room.tombstone' + AND {room_id_sql} + """.format( + room_id_sql=room_id_sql + ) + + def _get_tombstones_txn( + txn: LoggingTransaction, + ) -> Dict[str, JsonDict]: + txn.execute(tombstone_sql, room_id_args) + for room_id, event_id, json in txn: + for result_room in result[0]: + if result_room["room_id"] == room_id: + result_room["gay.rory.synapse_admin_extensions.tombstone"] = db_to_json(json) + break + return result[0], result[1] + + result = await self.db_pool.runInteraction( + "get_rooms_tombstones", _get_tombstones_txn, + ) + + return result + @cached(max_entries=10000) async def get_ratelimit_for_user(self, user_id: str) -> Optional[RatelimitOverride]: """Check if there are any overrides for ratelimiting for the given user @@ -1127,6 +1167,109 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): return local_media_ids + def _quarantine_local_media_txn( + self, + txn: LoggingTransaction, + hashes: Set[str], + media_ids: Set[str], + quarantined_by: Optional[str], + ) -> int: + """Quarantine and unquarantine local media items. + + Args: + txn (cursor) + hashes: A set of sha256 hashes for any media that should be quarantined + media_ids: A set of media IDs for any media that should be quarantined + quarantined_by: The ID of the user who initiated the quarantine request + If it is `None` media will be removed from quarantine + Returns: + The total number of media items quarantined + """ + total_media_quarantined = 0 + + # Effectively a legacy path, update any media that was explicitly named. + if media_ids: + sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause( + txn.database_engine, "media_id", media_ids + ) + sql = f""" + UPDATE local_media_repository + SET quarantined_by = ? + WHERE {sql_many_clause_sql}""" + + if quarantined_by is not None: + sql += " AND safe_from_quarantine = FALSE" + + txn.execute(sql, [quarantined_by] + sql_many_clause_args) + # Note that a rowcount of -1 can be used to indicate no rows were affected. + total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0 + + # Update any media that was identified via hash. + if hashes: + sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause( + txn.database_engine, "sha256", hashes + ) + sql = f""" + UPDATE local_media_repository + SET quarantined_by = ? + WHERE {sql_many_clause_sql}""" + + if quarantined_by is not None: + sql += " AND safe_from_quarantine = FALSE" + + txn.execute(sql, [quarantined_by] + sql_many_clause_args) + total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0 + + return total_media_quarantined + + def _quarantine_remote_media_txn( + self, + txn: LoggingTransaction, + hashes: Set[str], + media: Set[Tuple[str, str]], + quarantined_by: Optional[str], + ) -> int: + """Quarantine and unquarantine remote items + + Args: + txn (cursor) + hashes: A set of sha256 hashes for any media that should be quarantined + media_ids: A set of tuples (media_origin, media_id) for any media that should be quarantined + quarantined_by: The ID of the user who initiated the quarantine request + If it is `None` media will be removed from quarantine + Returns: + The total number of media items quarantined + """ + total_media_quarantined = 0 + + if media: + sql_in_list_clause, sql_args = make_tuple_in_list_sql_clause( + txn.database_engine, + ("media_origin", "media_id"), + media, + ) + sql = f""" + UPDATE remote_media_cache + SET quarantined_by = ? + WHERE {sql_in_list_clause}""" + + txn.execute(sql, [quarantined_by] + sql_args) + total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0 + + total_media_quarantined = 0 + if hashes: + sql_many_clause_sql, sql_many_clause_args = make_in_list_sql_clause( + txn.database_engine, "sha256", hashes + ) + sql = f""" + UPDATE remote_media_cache + SET quarantined_by = ? + WHERE {sql_many_clause_sql}""" + txn.execute(sql, [quarantined_by] + sql_many_clause_args) + total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0 + + return total_media_quarantined + def _quarantine_media_txn( self, txn: LoggingTransaction, @@ -1146,40 +1289,93 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): Returns: The total number of media items quarantined """ - - # Update all the tables to set the quarantined_by flag - sql = """ - UPDATE local_media_repository - SET quarantined_by = ? - WHERE media_id = ? - """ - - # set quarantine - if quarantined_by is not None: - sql += "AND safe_from_quarantine = FALSE" - txn.executemany( - sql, [(quarantined_by, media_id) for media_id in local_mxcs] + hashes = set() + media_ids = set() + remote_media = set() + + # First, determine the hashes of the media we want to delete. + # We also want the media_ids for any media that lacks a hash. + if local_mxcs: + hash_sql_many_clause_sql, hash_sql_many_clause_args = ( + make_in_list_sql_clause(txn.database_engine, "media_id", local_mxcs) ) - # remove from quarantine - else: - txn.executemany( - sql, [(quarantined_by, media_id) for media_id in local_mxcs] + hash_sql = f"SELECT sha256, media_id FROM local_media_repository WHERE {hash_sql_many_clause_sql}" + if quarantined_by is not None: + hash_sql += " AND safe_from_quarantine = FALSE" + + txn.execute(hash_sql, hash_sql_many_clause_args) + for sha256, media_id in txn: + if sha256: + hashes.add(sha256) + else: + media_ids.add(media_id) + + # Do the same for remote media + if remote_mxcs: + hash_sql_in_list_clause, hash_sql_args = make_tuple_in_list_sql_clause( + txn.database_engine, + ("media_origin", "media_id"), + remote_mxcs, ) - # Note that a rowcount of -1 can be used to indicate no rows were affected. - total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0 + hash_sql = f"SELECT sha256, media_origin, media_id FROM remote_media_cache WHERE {hash_sql_in_list_clause}" + txn.execute(hash_sql, hash_sql_args) + for sha256, media_origin, media_id in txn: + if sha256: + hashes.add(sha256) + else: + remote_media.add((media_origin, media_id)) - txn.executemany( - """ - UPDATE remote_media_cache - SET quarantined_by = ? - WHERE media_origin = ? AND media_id = ? - """, - ((quarantined_by, origin, media_id) for origin, media_id in remote_mxcs), + count = self._quarantine_local_media_txn(txn, hashes, media_ids, quarantined_by) + count += self._quarantine_remote_media_txn( + txn, hashes, remote_media, quarantined_by ) - total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0 - return total_media_quarantined + return count + + async def block_room(self, room_id: str, user_id: str) -> None: + """Marks the room as blocked. + + Can be called multiple times (though we'll only track the last user to + block this room). + + Can be called on a room unknown to this homeserver. + + Args: + room_id: Room to block + user_id: Who blocked it + """ + await self.db_pool.simple_upsert( + table="blocked_rooms", + keyvalues={"room_id": room_id}, + values={}, + insertion_values={"user_id": user_id}, + desc="block_room", + ) + await self.db_pool.runInteraction( + "block_room_invalidation", + self._invalidate_cache_and_stream, + self.is_room_blocked, + (room_id,), + ) + + async def unblock_room(self, room_id: str) -> None: + """Remove the room from blocking list. + + Args: + room_id: Room to unblock + """ + await self.db_pool.simple_delete( + table="blocked_rooms", + keyvalues={"room_id": room_id}, + desc="unblock_room", + ) + await self.db_pool.runInteraction( + "block_room_invalidation", + self._invalidate_cache_and_stream, + self.is_room_blocked, + (room_id,), + ) async def get_rooms_for_retention_period_in_range( self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False @@ -1382,6 +1578,30 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): partial_state_rooms = {row[0] for row in rows} return {room_id: room_id in partial_state_rooms for room_id in room_ids} + @cached(max_entries=10000, iterable=True) + async def get_partial_rooms(self) -> AbstractSet[str]: + """Get any "partial-state" rooms which the user is in. + + This is fast as the set of partially stated rooms at any point across + the whole server is small, and so such a query is fast. This is also + faster than looking up whether a set of room ID's are partially stated + via `is_partial_state_room_batched(...)` because of the sheer amount of + CPU time looking all the rooms up in the cache. + """ + + def _get_partial_rooms_for_user_txn( + txn: LoggingTransaction, + ) -> AbstractSet[str]: + sql = """ + SELECT room_id FROM partial_state_rooms + """ + txn.execute(sql) + return {room_id for (room_id,) in txn} + + return await self.db_pool.runInteraction( + "get_partial_rooms_for_user", _get_partial_rooms_for_user_txn + ) + async def get_join_event_id_and_device_lists_stream_id_for_partial_state( self, room_id: str ) -> Tuple[str, int]: @@ -1562,6 +1782,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): direction: Direction = Direction.BACKWARDS, user_id: Optional[str] = None, room_id: Optional[str] = None, + event_sender_user_id: Optional[str] = None, ) -> Tuple[List[Dict[str, Any]], int]: """Retrieve a paginated list of event reports @@ -1572,6 +1793,8 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): oldest first (forwards) user_id: search for user_id. Ignored if user_id is None room_id: search for room_id. Ignored if room_id is None + event_sender_user_id: search for the sender of the reported event. Ignored if + event_sender_user_id is None Returns: Tuple of: json list of event reports @@ -1591,6 +1814,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): filters.append("er.room_id LIKE ?") args.extend(["%" + room_id + "%"]) + if event_sender_user_id: + filters.append("events.sender = ?") + args.extend([event_sender_user_id]) + if direction == Direction.BACKWARDS: order = "DESC" else: @@ -1606,11 +1833,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): sql = """ SELECT COUNT(*) as total_event_reports FROM event_reports AS er + LEFT JOIN events USING(event_id) JOIN room_stats_state ON room_stats_state.room_id = er.room_id {} - """.format( - where_clause - ) + """.format(where_clause) txn.execute(sql, args) count = cast(Tuple[int], txn.fetchone())[0] @@ -1626,8 +1852,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): room_stats_state.canonical_alias, room_stats_state.name FROM event_reports AS er - LEFT JOIN events - ON events.event_id = er.event_id + LEFT JOIN events USING(event_id) JOIN room_stats_state ON room_stats_state.room_id = er.room_id {where_clause} @@ -2343,6 +2568,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): self._invalidate_cache_and_stream( txn, self._get_partial_state_servers_at_join, (room_id,) ) + self._invalidate_all_cache_and_stream(txn, self.get_partial_rooms) async def write_partial_state_rooms_join_event_id( self, @@ -2470,50 +2696,6 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): ) return next_id - async def block_room(self, room_id: str, user_id: str) -> None: - """Marks the room as blocked. - - Can be called multiple times (though we'll only track the last user to - block this room). - - Can be called on a room unknown to this homeserver. - - Args: - room_id: Room to block - user_id: Who blocked it - """ - await self.db_pool.simple_upsert( - table="blocked_rooms", - keyvalues={"room_id": room_id}, - values={}, - insertion_values={"user_id": user_id}, - desc="block_room", - ) - await self.db_pool.runInteraction( - "block_room_invalidation", - self._invalidate_cache_and_stream, - self.is_room_blocked, - (room_id,), - ) - - async def unblock_room(self, room_id: str) -> None: - """Remove the room from blocking list. - - Args: - room_id: Room to unblock - """ - await self.db_pool.simple_delete( - table="blocked_rooms", - keyvalues={"room_id": room_id}, - desc="unblock_room", - ) - await self.db_pool.runInteraction( - "block_room_invalidation", - self._invalidate_cache_and_stream, - self.is_room_blocked, - (room_id,), - ) - async def clear_partial_state_room(self, room_id: str) -> Optional[int]: """Clears the partial state flag for a room. @@ -2527,7 +2709,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): still contains events with partial state. """ try: - async with self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id: + async with ( + self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id + ): await self.db_pool.runInteraction( "clear_partial_state_room", self._clear_partial_state_room_txn, @@ -2564,6 +2748,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): self._invalidate_cache_and_stream( txn, self._get_partial_state_servers_at_join, (room_id,) ) + self._invalidate_all_cache_and_stream(txn, self.get_partial_rooms) DatabasePool.simple_insert_txn( txn, diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py
index 1d9f0f52e1..7ca73abb83 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py
@@ -19,6 +19,7 @@ # # import logging +from http import HTTPStatus from typing import ( TYPE_CHECKING, AbstractSet, @@ -39,6 +40,8 @@ from typing import ( import attr from synapse.api.constants import EventTypes, Membership +from synapse.api.errors import Codes, SynapseError +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.logging.opentracing import trace from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -50,13 +53,20 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.storage.databases.main.stream import _filter_results_by_stream from synapse.storage.engines import Sqlite3Engine -from synapse.storage.roommember import MemberSummary, ProfileInfo, RoomsForUser +from synapse.storage.roommember import ( + MemberSummary, + ProfileInfo, + RoomsForUser, + RoomsForUserSlidingSync, +) from synapse.types import ( JsonDict, PersistedEventPosition, StateMap, StrCollection, + StreamToken, get_domain_from_id, ) from synapse.util.caches.descriptors import _CacheContext, cached, cachedList @@ -71,6 +81,7 @@ logger = logging.getLogger(__name__) _MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update" _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME = "current_state_events_membership" +_POPULATE_PARTICIPANT_BG_UPDATE_BATCH_SIZE = 1000 @attr.s(frozen=True, slots=True, auto_attribs=True) @@ -225,9 +236,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): AND m.room_id = c.room_id AND m.user_id = c.state_key WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? AND %s - """ % ( - clause, - ) + """ % (clause,) txn.execute(sql, (room_id, Membership.JOIN, *ids)) return {r[0]: ProfileInfo(display_name=r[1], avatar_url=r[2]) for r in txn} @@ -306,18 +315,10 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): # We do this all in one transaction to keep the cache small. # FIXME: get rid of this when we have room_stats - # Note, rejected events will have a null membership field, so - # we we manually filter them out. - sql = """ - SELECT count(*), membership FROM current_state_events - WHERE type = 'm.room.member' AND room_id = ? - AND membership IS NOT NULL - GROUP BY membership - """ + counts = self._get_member_counts_txn(txn, room_id) - txn.execute(sql, (room_id,)) res: Dict[str, MemberSummary] = {} - for count, membership in txn: + for membership, count in counts.items(): res.setdefault(membership, MemberSummary([], count)) # Order by membership (joins -> invites -> leave (former insiders) -> @@ -364,6 +365,31 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): ) @cached() + async def get_member_counts(self, room_id: str) -> Mapping[str, int]: + """Get a mapping of number of users by membership""" + + return await self.db_pool.runInteraction( + "get_member_counts", self._get_member_counts_txn, room_id + ) + + def _get_member_counts_txn( + self, txn: LoggingTransaction, room_id: str + ) -> Dict[str, int]: + """Get a mapping of number of users by membership""" + + # Note, rejected events will have a null membership field, so + # we we manually filter them out. + sql = """ + SELECT count(*), membership FROM current_state_events + WHERE type = 'm.room.member' AND room_id = ? + AND membership IS NOT NULL + GROUP BY membership + """ + + txn.execute(sql, (room_id,)) + return {membership: count for count, membership in txn} + + @cached() async def get_number_joined_users_in_room(self, room_id: str) -> int: return await self.db_pool.simple_select_one_onecol( table="current_state_events", @@ -524,9 +550,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): WHERE user_id = ? AND %s - """ % ( - clause, - ) + """ % (clause,) txn.execute(sql, (user_id, *args)) results = [ @@ -631,10 +655,8 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ # Paranoia check. if not self.hs.is_mine_id(user_id): - raise Exception( - "Cannot call 'get_local_current_membership_for_user_in_room' on " - "non-local user %s" % (user_id,), - ) + message = f"Provided user_id {user_id} is a non-local user" + raise SynapseError(HTTPStatus.BAD_REQUEST, message, errcode=Codes.BAD_JSON) results = cast( Optional[Tuple[str, str]], @@ -692,6 +714,27 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): return {row[0] for row in txn} + async def get_rooms_user_currently_banned_from( + self, user_id: str + ) -> FrozenSet[str]: + """Returns a set of room_ids the user is currently banned from. + + If a remote user only returns rooms this server is currently + participating in. + """ + room_ids = await self.db_pool.simple_select_onecol( + table="current_state_events", + keyvalues={ + "type": EventTypes.Member, + "membership": Membership.BAN, + "state_key": user_id, + }, + retcol="room_id", + desc="get_rooms_user_currently_banned_from", + ) + + return frozenset(room_ids) + @cached(max_entries=500000, iterable=True) async def get_rooms_for_user(self, user_id: str) -> FrozenSet[str]: """Returns a set of room_ids the user is currently joined to. @@ -808,7 +851,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ txn.execute(sql, (user_id, *args)) - return {u: True for u, in txn} + return {u: True for (u,) in txn} to_return = {} for batch_user_ids in batch_iter(other_user_ids, 1000): @@ -828,6 +871,73 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): return {u for u, share_room in user_dict.items() if share_room} + @cached(max_entries=10000) + async def does_pair_of_users_share_a_room_joined_or_invited( + self, user_id: str, other_user_id: str + ) -> bool: + raise NotImplementedError() + + @cachedList( + cached_method_name="does_pair_of_users_share_a_room_joined_or_invited", + list_name="other_user_ids", + ) + async def _do_users_share_a_room_joined_or_invited( + self, user_id: str, other_user_ids: Collection[str] + ) -> Mapping[str, Optional[bool]]: + """Return mapping from user ID to whether they share a room with the + given user via being either joined or invited. + + Note: `None` and `False` are equivalent and mean they don't share a + room. + """ + + def do_users_share_a_room_joined_or_invited_txn( + txn: LoggingTransaction, user_ids: Collection[str] + ) -> Dict[str, bool]: + clause, args = make_in_list_sql_clause( + self.database_engine, "state_key", user_ids + ) + + # This query works by fetching both the list of rooms for the target + # user and the set of other users, and then checking if there is any + # overlap. + sql = f""" + SELECT DISTINCT b.state_key + FROM ( + SELECT room_id FROM current_state_events + WHERE type = 'm.room.member' AND (membership = 'join' OR membership = 'invite') AND state_key = ? + ) AS a + INNER JOIN ( + SELECT room_id, state_key FROM current_state_events + WHERE type = 'm.room.member' AND (membership = 'join' OR membership = 'invite') AND {clause} + ) AS b using (room_id) + """ + + txn.execute(sql, (user_id, *args)) + return {u: True for (u,) in txn} + + to_return = {} + for batch_user_ids in batch_iter(other_user_ids, 1000): + res = await self.db_pool.runInteraction( + "do_users_share_a_room_joined_or_invited", + do_users_share_a_room_joined_or_invited_txn, + batch_user_ids, + ) + to_return.update(res) + + return to_return + + async def do_users_share_a_room_joined_or_invited( + self, user_id: str, other_user_ids: Collection[str] + ) -> Set[str]: + """Return the set of users who share a room with the first users via being either joined or invited""" + + user_dict = await self._do_users_share_a_room_joined_or_invited( + user_id, other_user_ids + ) + + return {u for u, share_room in user_dict.items() if share_room} + async def get_users_who_share_room_with_user(self, user_id: str) -> Set[str]: """Returns the set of users who share a room with `user_id`""" room_ids = await self.get_rooms_for_user(user_id) @@ -1026,7 +1136,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): AND room_id = ? """ txn.execute(sql, (room_id,)) - return {d for d, in txn} + return {d for (d,) in txn} return await self.db_pool.runInteraction( "get_current_hosts_in_room", get_current_hosts_in_room_txn @@ -1094,7 +1204,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): """ txn.execute(sql, (room_id,)) # `server_domain` will be `NULL` for malformed MXIDs with no colons. - return tuple(d for d, in txn if d is not None) + return tuple(d for (d,) in txn if d is not None) return await self.db_pool.runInteraction( "get_current_hosts_in_room_ordered", get_current_hosts_in_room_ordered_txn @@ -1311,9 +1421,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): room_id = ? AND membership = ? AND NOT (%s) LIMIT 1 - """ % ( - clause, - ) + """ % (clause,) def _is_local_host_in_room_ignoring_users_txn( txn: LoggingTransaction, @@ -1337,11 +1445,23 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): keyvalues={"user_id": user_id, "room_id": room_id}, updatevalues={"forgotten": 1}, ) + # Handle updating the `sliding_sync_membership_snapshots` table + self.db_pool.simple_update_txn( + txn, + table="sliding_sync_membership_snapshots", + keyvalues={"user_id": user_id, "room_id": room_id}, + updatevalues={"forgotten": 1}, + ) self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id)) self._invalidate_cache_and_stream( txn, self.get_forgotten_rooms_for_user, (user_id,) ) + self._invalidate_cache_and_stream( + txn, + self.get_sliding_sync_rooms_for_user_from_membership_snapshots, + (user_id,), + ) await self.db_pool.runInteraction("forget_membership", f) @@ -1371,6 +1491,360 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): desc="room_forgetter_stream_pos", ) + @cached(iterable=True, max_entries=10000) + async def get_sliding_sync_rooms_for_user_from_membership_snapshots( + self, user_id: str + ) -> Mapping[str, RoomsForUserSlidingSync]: + """ + Get all the rooms for a user to handle a sliding sync request from the + `sliding_sync_membership_snapshots` table. These will be current memberships and + need to be rewound to the token range. + + Ignores forgotten rooms and rooms that the user has left themselves. + + Args: + user_id: The user ID to get the rooms for. + + Returns: + Map from room ID to membership info + """ + + def _txn( + txn: LoggingTransaction, + ) -> Dict[str, RoomsForUserSlidingSync]: + # XXX: If you use any new columns that can change (like from + # `sliding_sync_joined_rooms` or `forgotten`), make sure to bust the + # `get_sliding_sync_rooms_for_user_from_membership_snapshots` cache in the + # appropriate places (and add tests). + sql = """ + SELECT m.room_id, m.sender, m.membership, m.membership_event_id, + r.room_version, + m.event_instance_name, m.event_stream_ordering, + m.has_known_state, + COALESCE(j.room_type, m.room_type), + COALESCE(j.is_encrypted, m.is_encrypted) + FROM sliding_sync_membership_snapshots AS m + INNER JOIN rooms AS r USING (room_id) + LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join') + WHERE user_id = ? + AND m.forgotten = 0 + AND (m.membership != 'leave' OR m.user_id != m.sender) + """ + txn.execute(sql, (user_id,)) + + return { + row[0]: RoomsForUserSlidingSync( + room_id=row[0], + sender=row[1], + membership=row[2], + event_id=row[3], + room_version_id=row[4], + event_pos=PersistedEventPosition(row[5], row[6]), + has_known_state=bool(row[7]), + room_type=row[8], + is_encrypted=bool(row[9]), + ) + for row in txn + # We filter out unknown room versions proactively. They + # shouldn't go down sync and their metadata may be in a broken + # state (causing errors). + if row[4] in KNOWN_ROOM_VERSIONS + } + + return await self.db_pool.runInteraction( + "get_sliding_sync_rooms_for_user_from_membership_snapshots", + _txn, + ) + + async def get_sliding_sync_self_leave_rooms_after_to_token( + self, + user_id: str, + to_token: StreamToken, + ) -> Dict[str, RoomsForUserSlidingSync]: + """ + Get all the self-leave rooms for a user after the `to_token` (outside the token + range) that are potentially relevant[1] and needed to handle a sliding sync + request. The results are from the `sliding_sync_membership_snapshots` table and + will be current memberships and need to be rewound to the token range. + + [1] If a leave happens after the token range, we may have still been joined (or + any non-self-leave which is relevant to sync) to the room before so we need to + include it in the list of potentially relevant rooms and apply + our rewind logic (outside of this function) to see if it's actually relevant. + + This is basically a sister-function to + `get_sliding_sync_rooms_for_user_from_membership_snapshots`. We could + alternatively incorporate this logic into + `get_sliding_sync_rooms_for_user_from_membership_snapshots` but those results + are cached and the `to_token` isn't very cache friendly (people are constantly + requesting with new tokens) so we separate it out here. + + Args: + user_id: The user ID to get the rooms for. + to_token: Any self-leave memberships after this position will be returned. + + Returns: + Map from room ID to membership info + """ + # TODO: Potential to check + # `self._membership_stream_cache.has_entity_changed(...)` as an early-return + # shortcut. + + def _txn( + txn: LoggingTransaction, + ) -> Dict[str, RoomsForUserSlidingSync]: + sql = """ + SELECT m.room_id, m.sender, m.membership, m.membership_event_id, + r.room_version, + m.event_instance_name, m.event_stream_ordering, + m.has_known_state, + m.room_type, + m.is_encrypted + FROM sliding_sync_membership_snapshots AS m + INNER JOIN rooms AS r USING (room_id) + WHERE user_id = ? + AND m.forgotten = 0 + AND m.membership = 'leave' + AND m.user_id = m.sender + AND (m.event_stream_ordering > ?) + """ + # If a leave happens after the token range, we may have still been joined + # (or any non-self-leave which is relevant to sync) to the room before so we + # need to include it in the list of potentially relevant rooms and apply our + # rewind logic (outside of this function). + # + # To handle tokens with a non-empty instance_map we fetch more + # results than necessary and then filter down + min_to_token_position = to_token.room_key.stream + txn.execute(sql, (user_id, min_to_token_position)) + + # Map from room_id to membership info + room_membership_for_user_map: Dict[str, RoomsForUserSlidingSync] = {} + for row in txn: + room_for_user = RoomsForUserSlidingSync( + room_id=row[0], + sender=row[1], + membership=row[2], + event_id=row[3], + room_version_id=row[4], + event_pos=PersistedEventPosition(row[5], row[6]), + has_known_state=bool(row[7]), + room_type=row[8], + is_encrypted=bool(row[9]), + ) + + # We filter out unknown room versions proactively. They shouldn't go + # down sync and their metadata may be in a broken state (causing + # errors). + if row[4] not in KNOWN_ROOM_VERSIONS: + continue + + # We only want to include the self-leave membership if it happened after + # the token range. + # + # Since the database pulls out more than necessary, we need to filter it + # down here. + if _filter_results_by_stream( + lower_token=None, + upper_token=to_token.room_key, + instance_name=room_for_user.event_pos.instance_name, + stream_ordering=room_for_user.event_pos.stream, + ): + continue + + room_membership_for_user_map[room_for_user.room_id] = room_for_user + + return room_membership_for_user_map + + return await self.db_pool.runInteraction( + "get_sliding_sync_self_leave_rooms_after_to_token", + _txn, + ) + + async def get_sliding_sync_room_for_user( + self, user_id: str, room_id: str + ) -> Optional[RoomsForUserSlidingSync]: + """Get the sliding sync room entry for the given user and room.""" + + def get_sliding_sync_room_for_user_txn( + txn: LoggingTransaction, + ) -> Optional[RoomsForUserSlidingSync]: + sql = """ + SELECT m.room_id, m.sender, m.membership, m.membership_event_id, + r.room_version, + m.event_instance_name, m.event_stream_ordering, + m.has_known_state, + COALESCE(j.room_type, m.room_type), + COALESCE(j.is_encrypted, m.is_encrypted) + FROM sliding_sync_membership_snapshots AS m + INNER JOIN rooms AS r USING (room_id) + LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join') + WHERE user_id = ? + AND m.forgotten = 0 + AND m.room_id = ? + """ + txn.execute(sql, (user_id, room_id)) + row = txn.fetchone() + if not row: + return None + + return RoomsForUserSlidingSync( + room_id=row[0], + sender=row[1], + membership=row[2], + event_id=row[3], + room_version_id=row[4], + event_pos=PersistedEventPosition(row[5], row[6]), + has_known_state=bool(row[7]), + room_type=row[8], + is_encrypted=row[9], + ) + + return await self.db_pool.runInteraction( + "get_sliding_sync_room_for_user", get_sliding_sync_room_for_user_txn + ) + + async def get_sliding_sync_room_for_user_batch( + self, user_id: str, room_ids: StrCollection + ) -> Dict[str, RoomsForUserSlidingSync]: + """Get the sliding sync room entry for the given user and rooms.""" + + if not room_ids: + return {} + + def get_sliding_sync_room_for_user_batch_txn( + txn: LoggingTransaction, + ) -> Dict[str, RoomsForUserSlidingSync]: + clause, args = make_in_list_sql_clause( + self.database_engine, "m.room_id", room_ids + ) + sql = f""" + SELECT m.room_id, m.sender, m.membership, m.membership_event_id, + r.room_version, + m.event_instance_name, m.event_stream_ordering, + m.has_known_state, + COALESCE(j.room_type, m.room_type), + COALESCE(j.is_encrypted, m.is_encrypted) + FROM sliding_sync_membership_snapshots AS m + INNER JOIN rooms AS r USING (room_id) + LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join') + WHERE m.forgotten = 0 + AND {clause} + AND user_id = ? + """ + args.append(user_id) + txn.execute(sql, args) + + return { + row[0]: RoomsForUserSlidingSync( + room_id=row[0], + sender=row[1], + membership=row[2], + event_id=row[3], + room_version_id=row[4], + event_pos=PersistedEventPosition(row[5], row[6]), + has_known_state=bool(row[7]), + room_type=row[8], + is_encrypted=row[9], + ) + for row in txn + } + + return await self.db_pool.runInteraction( + "get_sliding_sync_room_for_user_batch", + get_sliding_sync_room_for_user_batch_txn, + ) + + async def get_rooms_for_user_by_date( + self, user_id: str, from_ts: int + ) -> FrozenSet[str]: + """ + Fetch a list of rooms that the user has joined at or after the given timestamp, including + those they subsequently have left/been banned from. + + Args: + user_id: user ID of the user to search for + from_ts: a timestamp in ms from the unix epoch at which to begin the search at + """ + + def _get_rooms_for_user_by_join_date_txn( + txn: LoggingTransaction, user_id: str, timestamp: int + ) -> frozenset: + sql = """ + SELECT rm.room_id + FROM room_memberships AS rm + INNER JOIN events AS e USING (event_id) + WHERE rm.user_id = ? + AND rm.membership = 'join' + AND e.type = 'm.room.member' + AND e.received_ts >= ? + """ + txn.execute(sql, (user_id, timestamp)) + return frozenset([r[0] for r in txn]) + + return await self.db_pool.runInteraction( + "_get_rooms_for_user_by_join_date_txn", + _get_rooms_for_user_by_join_date_txn, + user_id, + from_ts, + ) + + async def set_room_participation(self, user_id: str, room_id: str) -> None: + """ + Record the provided user as participating in the given room + + Args: + user_id: the user ID of the user + room_id: ID of the room to set the participant in + """ + + def _set_room_participation_txn( + txn: LoggingTransaction, user_id: str, room_id: str + ) -> None: + sql = """ + UPDATE room_memberships + SET participant = true + WHERE event_id IN ( + SELECT event_id FROM local_current_membership + WHERE user_id = ? AND room_id = ? + ) + AND NOT participant + """ + txn.execute(sql, (user_id, room_id)) + + await self.db_pool.runInteraction( + "_set_room_participation_txn", _set_room_participation_txn, user_id, room_id + ) + + async def get_room_participation(self, user_id: str, room_id: str) -> bool: + """ + Check whether a user is listed as a participant in a room + + Args: + user_id: user ID of the user + room_id: ID of the room to check in + """ + + def _get_room_participation_txn( + txn: LoggingTransaction, user_id: str, room_id: str + ) -> bool: + sql = """ + SELECT participant + FROM local_current_membership AS l + INNER JOIN room_memberships AS r USING (event_id) + WHERE l.user_id = ? + AND l.room_id = ? + """ + txn.execute(sql, (user_id, room_id)) + res = txn.fetchone() + if res: + return res[0] + return False + + return await self.db_pool.runInteraction( + "_get_room_participation_txn", _get_room_participation_txn, user_id, room_id + ) + class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__( @@ -1405,10 +1879,12 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore): self, progress: JsonDict, batch_size: int ) -> int: target_min_stream_id = progress.get( - "target_min_stream_id_inclusive", self._min_stream_order_on_start # type: ignore[attr-defined] + "target_min_stream_id_inclusive", + self._min_stream_order_on_start, # type: ignore[attr-defined] ) max_stream_id = progress.get( - "max_stream_id_exclusive", self._stream_order_on_start + 1 # type: ignore[attr-defined] + "max_stream_id_exclusive", + self._stream_order_on_start + 1, # type: ignore[attr-defined] ) def add_membership_profile_txn(txn: LoggingTransaction) -> int: diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py
index 20fcfd3122..1d5c5e72ff 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py
@@ -94,7 +94,7 @@ class SearchWorkerStore(SQLBaseStore): VALUES (?,?,?,to_tsvector('english', ?),?,?) """ - args1 = ( + args1 = [ ( entry.event_id, entry.room_id, @@ -104,7 +104,7 @@ class SearchWorkerStore(SQLBaseStore): entry.origin_server_ts, ) for entry in entries - ) + ] txn.execute_batch(sql, args1) @@ -177,9 +177,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): AND (%s) ORDER BY stream_ordering DESC LIMIT ? - """ % ( - " OR ".join("type = '%s'" % (t,) for t in TYPES), - ) + """ % (" OR ".join("type = '%s'" % (t,) for t in TYPES),) txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py new file mode 100644
index 0000000000..6a62b11d1e --- /dev/null +++ b/synapse/storage/databases/main/sliding_sync.py
@@ -0,0 +1,603 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2023, 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + + +import logging +from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, cast + +import attr + +from synapse.api.errors import SlidingSyncUnknownPosition +from synapse.logging.opentracing import log_kv +from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) +from synapse.types import MultiWriterStreamToken, RoomStreamToken +from synapse.types.handlers.sliding_sync import ( + HaveSentRoom, + HaveSentRoomFlag, + MutablePerConnectionState, + PerConnectionState, + RoomStatusMap, + RoomSyncConfig, +) +from synapse.util import json_encoder +from synapse.util.caches.descriptors import cached + +if TYPE_CHECKING: + from synapse.server import HomeServer + from synapse.storage.databases.main import DataStore + +logger = logging.getLogger(__name__) + + +class SlidingSyncStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_index_update( + update_name="sliding_sync_connection_room_configs_required_state_id_idx", + index_name="sliding_sync_connection_room_configs_required_state_id_idx", + table="sliding_sync_connection_room_configs", + columns=("required_state_id",), + ) + + self.db_pool.updates.register_background_index_update( + update_name="sliding_sync_membership_snapshots_membership_event_id_idx", + index_name="sliding_sync_membership_snapshots_membership_event_id_idx", + table="sliding_sync_membership_snapshots", + columns=("membership_event_id",), + ) + + self.db_pool.updates.register_background_index_update( + update_name="sliding_sync_membership_snapshots_user_id_stream_ordering", + index_name="sliding_sync_membership_snapshots_user_id_stream_ordering", + table="sliding_sync_membership_snapshots", + columns=("user_id", "event_stream_ordering"), + replaces_index="sliding_sync_membership_snapshots_user_id", + ) + + async def get_latest_bump_stamp_for_room( + self, + room_id: str, + ) -> Optional[int]: + """ + Get the `bump_stamp` for the room. + + The `bump_stamp` is the `stream_ordering` of the last event according to the + `bump_event_types`. This helps clients sort more readily without them needing to + pull in a bunch of the timeline to determine the last activity. + `bump_event_types` is a thing because for example, we don't want display name + changes to mark the room as unread and bump it to the top. For encrypted rooms, + we just have to consider any activity as a bump because we can't see the content + and the client has to figure it out for themselves. + + This should only be called where the server is participating + in the room (someone local is joined). + + Returns: + The `bump_stamp` for the room (which can be `None`). + """ + + return cast( + Optional[int], + await self.db_pool.simple_select_one_onecol( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + retcol="bump_stamp", + # FIXME: This should be `False` once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked + # by https://github.com/element-hq/synapse/issues/17623) + # + # The should be `allow_none=False` in the future because event though + # `bump_stamp` itself can be `None`, we should have a row in the + # `sliding_sync_joined_rooms` table for any joined room. + allow_none=True, + ), + ) + + async def persist_per_connection_state( + self, + user_id: str, + device_id: str, + conn_id: str, + previous_connection_position: Optional[int], + per_connection_state: "MutablePerConnectionState", + ) -> int: + """Persist updates to the per-connection state for a sliding sync + connection. + + Returns: + The connection position of the newly persisted state. + """ + + # This cast is safe because the downstream code only cares about + # `store.get_id_for_instance(...)` and `StreamWorkerStore` is mixed + # alongside `SlidingSyncStore` wherever we create a store. + store = cast("DataStore", self) + + return await self.db_pool.runInteraction( + "persist_per_connection_state", + self.persist_per_connection_state_txn, + user_id=user_id, + device_id=device_id, + conn_id=conn_id, + previous_connection_position=previous_connection_position, + per_connection_state=await PerConnectionStateDB.from_state( + per_connection_state, store + ), + ) + + def persist_per_connection_state_txn( + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + conn_id: str, + previous_connection_position: Optional[int], + per_connection_state: "PerConnectionStateDB", + ) -> int: + # First we fetch (or create) the connection key associated with the + # previous connection position. + if previous_connection_position is not None: + # The `previous_connection_position` is a user-supplied value, so we + # need to make sure that the one they supplied is actually theirs. + sql = """ + SELECT connection_key + FROM sliding_sync_connection_positions + INNER JOIN sliding_sync_connections USING (connection_key) + WHERE + connection_position = ? + AND user_id = ? AND effective_device_id = ? AND conn_id = ? + """ + txn.execute( + sql, (previous_connection_position, user_id, device_id, conn_id) + ) + row = txn.fetchone() + if row is None: + raise SlidingSyncUnknownPosition() + + (connection_key,) = row + else: + # We're restarting the connection, so we clear the previous existing data we + # used to track it. We do this here to ensure that if we get lots of + # one-shot requests we don't stack up lots of entries. We have `ON DELETE + # CASCADE` setup on the dependent tables so this will clear out all the + # associated data. + self.db_pool.simple_delete_txn( + txn, + table="sliding_sync_connections", + keyvalues={ + "user_id": user_id, + "effective_device_id": device_id, + "conn_id": conn_id, + }, + ) + + (connection_key,) = self.db_pool.simple_insert_returning_txn( + txn, + table="sliding_sync_connections", + values={ + "user_id": user_id, + "effective_device_id": device_id, + "conn_id": conn_id, + "created_ts": self._clock.time_msec(), + }, + returning=("connection_key",), + ) + + # Define a new connection position for the updates + (connection_position,) = self.db_pool.simple_insert_returning_txn( + txn, + table="sliding_sync_connection_positions", + values={ + "connection_key": connection_key, + "created_ts": self._clock.time_msec(), + }, + returning=("connection_position",), + ) + + # We need to deduplicate the `required_state` JSON. We do this by + # fetching all JSON associated with the connection and comparing that + # with the updates to `required_state` + + # Dict from required state json -> required state ID + required_state_to_id: Dict[str, int] = {} + if previous_connection_position is not None: + rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_required_state", + keyvalues={"connection_key": connection_key}, + retcols=("required_state_id", "required_state"), + ) + for required_state_id, required_state in rows: + required_state_to_id[required_state] = required_state_id + + room_to_state_ids: Dict[str, int] = {} + unique_required_state: Dict[str, List[str]] = {} + for room_id, room_state in per_connection_state.room_configs.items(): + serialized_state = json_encoder.encode( + # We store the required state as a sorted list of event type / + # state key tuples. + sorted( + (event_type, state_key) + for event_type, state_keys in room_state.required_state_map.items() + for state_key in state_keys + ) + ) + + existing_state_id = required_state_to_id.get(serialized_state) + if existing_state_id is not None: + room_to_state_ids[room_id] = existing_state_id + else: + unique_required_state.setdefault(serialized_state, []).append(room_id) + + # Insert any new `required_state` json we haven't previously seen. + for serialized_required_state, room_ids in unique_required_state.items(): + (required_state_id,) = self.db_pool.simple_insert_returning_txn( + txn, + table="sliding_sync_connection_required_state", + values={ + "connection_key": connection_key, + "required_state": serialized_required_state, + }, + returning=("required_state_id",), + ) + for room_id in room_ids: + room_to_state_ids[room_id] = required_state_id + + # Copy over state from the previous connection position (we'll overwrite + # these rows with any changes). + if previous_connection_position is not None: + sql = """ + INSERT INTO sliding_sync_connection_streams + (connection_position, stream, room_id, room_status, last_token) + SELECT ?, stream, room_id, room_status, last_token + FROM sliding_sync_connection_streams + WHERE connection_position = ? + """ + txn.execute(sql, (connection_position, previous_connection_position)) + + sql = """ + INSERT INTO sliding_sync_connection_room_configs + (connection_position, room_id, timeline_limit, required_state_id) + SELECT ?, room_id, timeline_limit, required_state_id + FROM sliding_sync_connection_room_configs + WHERE connection_position = ? + """ + txn.execute(sql, (connection_position, previous_connection_position)) + + # We now upsert the changes to the various streams. + key_values = [] + value_values = [] + for room_id, have_sent_room in per_connection_state.rooms._statuses.items(): + key_values.append((connection_position, "rooms", room_id)) + value_values.append( + (have_sent_room.status.value, have_sent_room.last_token) + ) + + for room_id, have_sent_room in per_connection_state.receipts._statuses.items(): + key_values.append((connection_position, "receipts", room_id)) + value_values.append( + (have_sent_room.status.value, have_sent_room.last_token) + ) + + for ( + room_id, + have_sent_room, + ) in per_connection_state.account_data._statuses.items(): + key_values.append((connection_position, "account_data", room_id)) + value_values.append( + (have_sent_room.status.value, have_sent_room.last_token) + ) + + self.db_pool.simple_upsert_many_txn( + txn, + table="sliding_sync_connection_streams", + key_names=( + "connection_position", + "stream", + "room_id", + ), + key_values=key_values, + value_names=( + "room_status", + "last_token", + ), + value_values=value_values, + ) + + # ... and upsert changes to the room configs. + keys = [] + values = [] + for room_id, room_config in per_connection_state.room_configs.items(): + keys.append((connection_position, room_id)) + values.append((room_config.timeline_limit, room_to_state_ids[room_id])) + + self.db_pool.simple_upsert_many_txn( + txn, + table="sliding_sync_connection_room_configs", + key_names=( + "connection_position", + "room_id", + ), + key_values=keys, + value_names=( + "timeline_limit", + "required_state_id", + ), + value_values=values, + ) + + return connection_position + + @cached(iterable=True, max_entries=100000) + async def get_and_clear_connection_positions( + self, user_id: str, device_id: str, conn_id: str, connection_position: int + ) -> "PerConnectionState": + """Get the per-connection state for the given connection position.""" + + per_connection_state_db = await self.db_pool.runInteraction( + "get_and_clear_connection_positions", + self._get_and_clear_connection_positions_txn, + user_id=user_id, + device_id=device_id, + conn_id=conn_id, + connection_position=connection_position, + ) + + # This cast is safe because the downstream code only cares about + # `store.get_id_for_instance(...)` and `StreamWorkerStore` is mixed + # alongside `SlidingSyncStore` wherever we create a store. + store = cast("DataStore", self) + + return await per_connection_state_db.to_state(store) + + def _get_and_clear_connection_positions_txn( + self, + txn: LoggingTransaction, + user_id: str, + device_id: str, + conn_id: str, + connection_position: int, + ) -> "PerConnectionStateDB": + # The `previous_connection_position` is a user-supplied value, so we + # need to make sure that the one they supplied is actually theirs. + sql = """ + SELECT connection_key + FROM sliding_sync_connection_positions + INNER JOIN sliding_sync_connections USING (connection_key) + WHERE + connection_position = ? + AND user_id = ? AND effective_device_id = ? AND conn_id = ? + """ + txn.execute(sql, (connection_position, user_id, device_id, conn_id)) + row = txn.fetchone() + if row is None: + raise SlidingSyncUnknownPosition() + + (connection_key,) = row + + # Now that we have seen the client has received and used the connection + # position, we can delete all the other connection positions. + sql = """ + DELETE FROM sliding_sync_connection_positions + WHERE connection_key = ? AND connection_position != ? + """ + txn.execute(sql, (connection_key, connection_position)) + + # Fetch and create a mapping from required state ID to the actual + # required state for the connection. + rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_required_state", + keyvalues={"connection_key": connection_key}, + retcols=( + "required_state_id", + "required_state", + ), + ) + + required_state_map: Dict[int, Dict[str, Set[str]]] = {} + for row in rows: + state = required_state_map[row[0]] = {} + for event_type, state_key in db_to_json(row[1]): + state.setdefault(event_type, set()).add(state_key) + + # Get all the room configs, looking up the required state from the map + # above. + room_config_rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_room_configs", + keyvalues={"connection_position": connection_position}, + retcols=( + "room_id", + "timeline_limit", + "required_state_id", + ), + ) + + room_configs: Dict[str, RoomSyncConfig] = {} + for ( + room_id, + timeline_limit, + required_state_id, + ) in room_config_rows: + room_configs[room_id] = RoomSyncConfig( + timeline_limit=timeline_limit, + required_state_map=required_state_map[required_state_id], + ) + + # Now look up the per-room stream data. + rooms: Dict[str, HaveSentRoom[str]] = {} + receipts: Dict[str, HaveSentRoom[str]] = {} + account_data: Dict[str, HaveSentRoom[str]] = {} + + receipt_rows = self.db_pool.simple_select_list_txn( + txn, + table="sliding_sync_connection_streams", + keyvalues={"connection_position": connection_position}, + retcols=( + "stream", + "room_id", + "room_status", + "last_token", + ), + ) + for stream, room_id, room_status, last_token in receipt_rows: + have_sent_room: HaveSentRoom[str] = HaveSentRoom( + status=HaveSentRoomFlag(room_status), last_token=last_token + ) + if stream == "rooms": + rooms[room_id] = have_sent_room + elif stream == "receipts": + receipts[room_id] = have_sent_room + elif stream == "account_data": + account_data[room_id] = have_sent_room + else: + # For forwards compatibility we ignore unknown streams, as in + # future we want to be able to easily add more stream types. + logger.warning("Unrecognized sliding sync stream in DB %r", stream) + + return PerConnectionStateDB( + rooms=RoomStatusMap(rooms), + receipts=RoomStatusMap(receipts), + account_data=RoomStatusMap(account_data), + room_configs=room_configs, + ) + + +@attr.s(auto_attribs=True, frozen=True) +class PerConnectionStateDB: + """An equivalent to `PerConnectionState` that holds data in a format stored + in the DB. + + The principle difference is that the tokens for the different streams are + serialized to strings. + + When persisting this *only* contains updates to the state. + """ + + rooms: "RoomStatusMap[str]" + receipts: "RoomStatusMap[str]" + account_data: "RoomStatusMap[str]" + + room_configs: Mapping[str, "RoomSyncConfig"] + + @staticmethod + async def from_state( + per_connection_state: "MutablePerConnectionState", store: "DataStore" + ) -> "PerConnectionStateDB": + """Convert from a standard `PerConnectionState`""" + rooms = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await status.last_token.to_string(store) + if status.last_token is not None + else None + ), + ) + for room_id, status in per_connection_state.rooms.get_updates().items() + } + + receipts = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await status.last_token.to_string(store) + if status.last_token is not None + else None + ), + ) + for room_id, status in per_connection_state.receipts.get_updates().items() + } + + account_data = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + str(status.last_token) if status.last_token is not None else None + ), + ) + for room_id, status in per_connection_state.account_data.get_updates().items() + } + + log_kv( + { + "rooms": rooms, + "receipts": receipts, + "account_data": account_data, + "room_configs": per_connection_state.room_configs.maps[0], + } + ) + + return PerConnectionStateDB( + rooms=RoomStatusMap(rooms), + receipts=RoomStatusMap(receipts), + account_data=RoomStatusMap(account_data), + room_configs=per_connection_state.room_configs.maps[0], + ) + + async def to_state(self, store: "DataStore") -> "PerConnectionState": + """Convert into a standard `PerConnectionState`""" + rooms = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await RoomStreamToken.parse(store, status.last_token) + if status.last_token is not None + else None + ), + ) + for room_id, status in self.rooms._statuses.items() + } + + receipts = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + await MultiWriterStreamToken.parse(store, status.last_token) + if status.last_token is not None + else None + ), + ) + for room_id, status in self.receipts._statuses.items() + } + + account_data = { + room_id: HaveSentRoom( + status=status.status, + last_token=( + int(status.last_token) if status.last_token is not None else None + ), + ) + for room_id, status in self.account_data._statuses.items() + } + + return PerConnectionState( + rooms=RoomStatusMap(rooms), + receipts=RoomStatusMap(receipts), + account_data=RoomStatusMap(account_data), + room_configs=self.room_configs, + ) diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py
index 62bc4600fb..788f7d1e32 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py
@@ -308,8 +308,24 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return create_event @cached(max_entries=10000) - async def get_room_type(self, room_id: str) -> Optional[str]: - raise NotImplementedError() + async def get_room_type(self, room_id: str) -> Union[Optional[str], Sentinel]: + """Fetch room type for given room. + + Since this function is cached, any missing values would be cached as + `None`. In order to distinguish between an unencrypted room that has + `None` encryption and a room that is unknown to the server where we + might want to omit the value (which would make it cached as `None`), + instead we use the sentinel value `ROOM_UNKNOWN_SENTINEL`. + """ + + try: + create_event = await self.get_create_event_for_room(room_id) + return create_event.content.get(EventContentFields.ROOM_TYPE) + except NotFoundError: + # We use the sentinel value to distinguish between `None` which is a + # valid room type and a room that is unknown to the server so the value + # is just unset. + return ROOM_UNKNOWN_SENTINEL @cachedList(cached_method_name="get_room_type", list_name="room_ids") async def bulk_get_room_type( @@ -535,7 +551,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): desc="check_if_events_in_current_state", ) - return frozenset(event_id for event_id, in rows) + return frozenset(event_id for (event_id,) in rows) # FIXME: how should this be cached? @cancellable @@ -556,10 +572,10 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): Returns: Map from type/state_key to event ID. """ + if state_filter is None: + state_filter = StateFilter.all() - where_clause, where_args = ( - state_filter or StateFilter.all() - ).make_sql_filter_clause() + where_clause, where_args = (state_filter).make_sql_filter_clause() if not where_clause: # We delegate to the cached version @@ -568,7 +584,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def _get_filtered_current_state_ids_txn( txn: LoggingTransaction, ) -> StateMap[str]: - results = StateMapWrapper(state_filter=state_filter or StateFilter.all()) + results = StateMapWrapper(state_filter=state_filter) sql = """ SELECT type, state_key, event_id FROM current_state_events @@ -665,7 +681,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): context: EventContext, ) -> None: """Update the state group for a partial state event""" - async with self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id: + async with ( + self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id + ): await self.db_pool.runInteraction( "update_state_for_partial_state_event", self._update_state_for_partial_state_event_txn, @@ -736,6 +754,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events" + MEMBERS_CURRENT_STATE_UPDATE_NAME = "current_state_events_members_room_index" def __init__( self, @@ -764,6 +783,13 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): self.DELETE_CURRENT_STATE_UPDATE_NAME, self._background_remove_left_rooms, ) + self.db_pool.updates.register_background_index_update( + self.MEMBERS_CURRENT_STATE_UPDATE_NAME, + index_name="current_state_events_members_room_index", + table="current_state_events", + columns=["room_id", "membership"], + where_clause="type='m.room.member'", + ) async def _background_remove_left_rooms( self, progress: JsonDict, batch_size: int diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py
index 9ed39e688a..00f87cc3a1 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py
@@ -20,16 +20,25 @@ # import logging -from typing import List, Optional, Tuple +from typing import TYPE_CHECKING, List, Optional, Tuple import attr from synapse.logging.opentracing import trace from synapse.storage._base import SQLBaseStore -from synapse.storage.database import LoggingTransaction +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, + make_in_list_sql_clause, +) from synapse.storage.databases.main.stream import _filter_results_by_stream -from synapse.types import RoomStreamToken +from synapse.types import RoomStreamToken, StrCollection from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.util.iterutils import batch_iter + +if TYPE_CHECKING: + from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -53,6 +62,21 @@ class StateDeltasStore(SQLBaseStore): # attribute. TODO: can we get static analysis to enforce this? _curr_state_delta_stream_cache: StreamChangeCache + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_index_update( + update_name="current_state_delta_stream_room_index", + index_name="current_state_delta_stream_room_idx", + table="current_state_delta_stream", + columns=("room_id", "stream_id"), + ) + async def get_partial_current_state_deltas( self, prev_stream_id: int, max_stream_id: int ) -> Tuple[int, List[StateDelta]]: @@ -74,9 +98,9 @@ class StateDeltasStore(SQLBaseStore): prev_stream_id = int(prev_stream_id) # check we're not going backwards - assert ( - prev_stream_id <= max_stream_id - ), f"New stream id {max_stream_id} is smaller than prev stream id {prev_stream_id}" + assert prev_stream_id <= max_stream_id, ( + f"New stream id {max_stream_id} is smaller than prev stream id {prev_stream_id}" + ) if not self._curr_state_delta_stream_cache.has_any_entity_changed( prev_stream_id @@ -160,38 +184,144 @@ class StateDeltasStore(SQLBaseStore): self._get_max_stream_id_in_current_state_deltas_txn, ) + def get_current_state_deltas_for_room_txn( + self, + txn: LoggingTransaction, + room_id: str, + *, + from_token: Optional[RoomStreamToken], + to_token: Optional[RoomStreamToken], + ) -> List[StateDelta]: + """ + Get the state deltas between two tokens. + + (> `from_token` and <= `to_token`) + """ + from_clause = "" + from_args = [] + if from_token is not None: + from_clause = "AND ? < stream_id" + from_args = [from_token.stream] + + to_clause = "" + to_args = [] + if to_token is not None: + to_clause = "AND stream_id <= ?" + to_args = [to_token.get_max_stream_pos()] + + sql = f""" + SELECT instance_name, stream_id, type, state_key, event_id, prev_event_id + FROM current_state_delta_stream + WHERE room_id = ? {from_clause} {to_clause} + ORDER BY stream_id ASC + """ + txn.execute(sql, [room_id] + from_args + to_args) + + return [ + StateDelta( + stream_id=row[1], + room_id=room_id, + event_type=row[2], + state_key=row[3], + event_id=row[4], + prev_event_id=row[5], + ) + for row in txn + if _filter_results_by_stream(from_token, to_token, row[0], row[1]) + ] + @trace async def get_current_state_deltas_for_room( - self, room_id: str, from_token: RoomStreamToken, to_token: RoomStreamToken + self, + room_id: str, + *, + from_token: Optional[RoomStreamToken], + to_token: Optional[RoomStreamToken], ) -> List[StateDelta]: - """Get the state deltas between two tokens.""" + """ + Get the state deltas between two tokens. + + (> `from_token` and <= `to_token`) + """ + # We can bail early if the `from_token` is after the `to_token` + if ( + to_token is not None + and from_token is not None + and to_token.is_before_or_eq(from_token) + ): + return [] - def get_current_state_deltas_for_room_txn( + if ( + from_token is not None + and not self._curr_state_delta_stream_cache.has_entity_changed( + room_id, from_token.stream + ) + ): + return [] + + return await self.db_pool.runInteraction( + "get_current_state_deltas_for_room", + self.get_current_state_deltas_for_room_txn, + room_id, + from_token=from_token, + to_token=to_token, + ) + + @trace + async def get_current_state_deltas_for_rooms( + self, + room_ids: StrCollection, + from_token: RoomStreamToken, + to_token: RoomStreamToken, + ) -> List[StateDelta]: + """Get the state deltas between two tokens for the set of rooms.""" + + room_ids = self._curr_state_delta_stream_cache.get_entities_changed( + room_ids, from_token.stream + ) + if not room_ids: + return [] + + def get_current_state_deltas_for_rooms_txn( txn: LoggingTransaction, + room_ids: StrCollection, ) -> List[StateDelta]: - sql = """ - SELECT instance_name, stream_id, type, state_key, event_id, prev_event_id + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", room_ids + ) + + sql = f""" + SELECT instance_name, stream_id, room_id, type, state_key, event_id, prev_event_id FROM current_state_delta_stream - WHERE room_id = ? AND ? < stream_id AND stream_id <= ? + WHERE {clause} AND ? < stream_id AND stream_id <= ? ORDER BY stream_id ASC """ - txn.execute( - sql, (room_id, from_token.stream, to_token.get_max_stream_pos()) - ) + args.append(from_token.stream) + args.append(to_token.get_max_stream_pos()) + + txn.execute(sql, args) return [ StateDelta( stream_id=row[1], - room_id=room_id, - event_type=row[2], - state_key=row[3], - event_id=row[4], - prev_event_id=row[5], + room_id=row[2], + event_type=row[3], + state_key=row[4], + event_id=row[5], + prev_event_id=row[6], ) for row in txn if _filter_results_by_stream(from_token, to_token, row[0], row[1]) ] - return await self.db_pool.runInteraction( - "get_current_state_deltas_for_room", get_current_state_deltas_for_room_txn - ) + results = [] + for batch in batch_iter(room_ids, 1000): + deltas = await self.db_pool.runInteraction( + "get_current_state_deltas_for_rooms", + get_current_state_deltas_for_rooms_txn, + batch, + ) + + results.extend(deltas) + + return results diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py
index e9f6a918c7..79c49e7fd9 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py
@@ -161,7 +161,7 @@ class StatsStore(StateDeltasStore): LIMIT ? """ txn.execute(sql, (last_user_id, batch_size)) - return [r for r, in txn] + return [r for (r,) in txn] users_to_work_on = await self.db_pool.runInteraction( "_populate_stats_process_users", _get_next_batch @@ -207,7 +207,7 @@ class StatsStore(StateDeltasStore): LIMIT ? """ txn.execute(sql, (last_room_id, batch_size)) - return [r for r, in txn] + return [r for (r,) in txn] rooms_to_work_on = await self.db_pool.runInteraction( "populate_stats_rooms_get_batch", _get_next_batch @@ -751,9 +751,7 @@ class StatsStore(StateDeltasStore): LEFT JOIN profiles AS p ON lmr.user_id = p.full_user_id {} GROUP BY lmr.user_id, displayname - """.format( - where_clause - ) + """.format(where_clause) # SQLite does not support SELECT COUNT(*) OVER() sql = """ diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index 4989c960a6..3fda49f31f 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py
@@ -21,7 +21,7 @@ # # -""" This module is responsible for getting events from the DB for pagination +"""This module is responsible for getting events from the DB for pagination and event streaming. The order it returns events in depend on whether we are streaming forwards or @@ -50,6 +50,8 @@ from typing import ( Dict, Iterable, List, + Literal, + Mapping, Optional, Protocol, Set, @@ -60,7 +62,7 @@ from typing import ( import attr from immutabledict import immutabledict -from typing_extensions import Literal, assert_never +from typing_extensions import assert_never from twisted.internet import defer @@ -78,9 +80,10 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine +from synapse.storage.roommember import RoomsForUserStateReset from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import PersistedEventPosition, RoomStreamToken, StrCollection -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.cancellation import cancellable from synapse.util.iterutils import batch_iter @@ -107,7 +110,7 @@ class PaginateFunction(Protocol): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Tuple[List[EventBase], RoomStreamToken]: ... + ) -> Tuple[List[EventBase], RoomStreamToken, bool]: ... # Used as return values for pagination APIs @@ -451,6 +454,8 @@ def _filter_results_by_stream( stream_ordering falls between the two tokens (taking a None token to mean unbounded). + The token range is defined by > `lower_token` and <= `upper_token`. + Used to filter results from fetching events in the DB against the given tokens. This is necessary to handle the case where the tokens include position maps, which we handle by fetching more than necessary from the DB @@ -678,7 +683,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken]]: + ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken, bool]]: """Get new room events in stream ordering since `from_key`. Args: @@ -694,6 +699,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): A map from room id to a tuple containing: - list of recent events in the room - stream ordering key for the start of the chunk of events returned. + - a boolean to indicate if there were more events but we hit the limit When Direction.FORWARDS: from_key < x <= to_key, (ascending order) When Direction.BACKWARDS: from_key >= x > to_key, (descending order) @@ -749,6 +755,48 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if self._events_stream_cache.has_entity_changed(room_id, from_id) } + async def get_rooms_that_have_updates_since_sliding_sync_table( + self, + room_ids: StrCollection, + from_key: RoomStreamToken, + ) -> StrCollection: + """Return the rooms that probably have had updates since the given + token (changes that are > `from_key`).""" + # If the stream change cache is valid for the stream token, we can just + # use the result of that. + if from_key.stream >= self._events_stream_cache.get_earliest_known_position(): + return self._events_stream_cache.get_entities_changed( + room_ids, from_key.stream + ) + + def get_rooms_that_have_updates_since_sliding_sync_table_txn( + txn: LoggingTransaction, + ) -> StrCollection: + sql = """ + SELECT room_id + FROM sliding_sync_joined_rooms + WHERE {clause} + AND event_stream_ordering > ? + """ + + results: Set[str] = set() + for batch in batch_iter(room_ids, 1000): + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", batch + ) + + args.append(from_key.stream) + txn.execute(sql.format(clause=clause), args) + + results.update(row[0] for row in txn) + + return results + + return await self.db_pool.runInteraction( + "get_rooms_that_have_updates_since_sliding_sync_table", + get_rooms_that_have_updates_since_sliding_sync_table_txn, + ) + async def paginate_room_events_by_stream_ordering( self, *, @@ -757,7 +805,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Tuple[List[EventBase], RoomStreamToken]: + ) -> Tuple[List[EventBase], RoomStreamToken, bool]: """ Paginate events by `stream_ordering` in the room from the `from_key` in the given `direction` to the `to_key` or `limit`. @@ -772,8 +820,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): limit: Maximum number of events to return Returns: - The results as a list of events and a token that points to the end - of the result set. If no events are returned then the end of the + The results as a list of events, a token that points to the end of + the result set, and a boolean to indicate if there were more events + but we hit the limit. If no events are returned then the end of the stream has been reached (i.e. there are no events between `from_key` and `to_key`). @@ -797,7 +846,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and to_key.is_before_or_eq(from_key) ): # Token selection matches what we do below if there are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False # Or vice-versa, if we're looking backwards and our `from_key` is already before # our `to_key`. elif ( @@ -806,7 +855,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and from_key.is_before_or_eq(to_key) ): # Token selection matches what we do below if there are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False # We can do a quick sanity check to see if any events have been sent in the room # since the earlier token. @@ -825,7 +874,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if not has_changed: # Token selection matches what we do below if there are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False order, from_bound, to_bound = generate_pagination_bounds( direction, from_key, to_key @@ -841,7 +890,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): engine=self.database_engine, ) - def f(txn: LoggingTransaction) -> List[_EventDictReturn]: + def f(txn: LoggingTransaction) -> Tuple[List[_EventDictReturn], bool]: sql = f""" SELECT event_id, instance_name, stream_ordering FROM events @@ -853,9 +902,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ txn.execute(sql, (room_id, 2 * limit)) + # Get all the rows and check if we hit the limit. + fetched_rows = txn.fetchall() + limited = len(fetched_rows) >= 2 * limit + rows = [ _EventDictReturn(event_id, None, stream_ordering) - for event_id, instance_name, stream_ordering in txn + for event_id, instance_name, stream_ordering in fetched_rows if _filter_results_by_stream( lower_token=( to_key if direction == Direction.BACKWARDS else from_key @@ -866,10 +919,17 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): instance_name=instance_name, stream_ordering=stream_ordering, ) - ][:limit] - return rows + ] + + if len(rows) > limit: + limited = True - rows = await self.db_pool.runInteraction("get_room_events_stream_for_room", f) + rows = rows[:limit] + return rows, limited + + rows, limited = await self.db_pool.runInteraction( + "get_room_events_stream_for_room", f + ) ret = await self.get_events_as_list( [r.event_id for r in rows], get_prev_content=True @@ -886,7 +946,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # `_paginate_room_events_by_topological_ordering_txn(...)`) next_key = to_key if to_key else from_key - return ret, next_key + return ret, next_key, limited @trace async def get_current_state_delta_membership_changes_for_user( @@ -927,7 +987,17 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): Returns: All membership changes to the current state in the token range. Events are sorted by `stream_ordering` ascending. + + `event_id`/`sender` can be `None` when the server leaves a room (meaning + everyone locally left) or a state reset which removed the person from the + room. We can't tell the difference between the two cases with what's + available in the `current_state_delta_stream` table. To actually check for a + state reset, you need to check if a membership still exists in the room. """ + + assert from_key.topological is None + assert to_key.topological is None + # Start by ruling out cases where a DB query is not necessary. if from_key == to_key: return [] @@ -1038,6 +1108,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): membership=( membership if membership is not None else Membership.LEAVE ), + # This will also be null for the same reasons if `s.event_id = null` sender=sender, # Prev event prev_event_id=prev_event_id, @@ -1072,6 +1143,203 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if membership_change.room_id not in room_ids_to_exclude ] + @trace + async def get_sliding_sync_membership_changes( + self, + user_id: str, + from_key: RoomStreamToken, + to_key: RoomStreamToken, + excluded_room_ids: Optional[AbstractSet[str]] = None, + ) -> Dict[str, RoomsForUserStateReset]: + """ + Fetch membership events that result in a meaningful membership change for a + given user. + + A meaningful membership changes is one where the `membership` value actually + changes. This means memberships changes from `join` to `join` (like a display + name change) will be filtered out since they result in no meaningful change. + + Note: This function only works with "live" tokens with `stream_ordering` only. + + We're looking for membership changes in the token range (> `from_key` and <= + `to_key`). + + Args: + user_id: The user ID to fetch membership events for. + from_key: The point in the stream to sync from (fetching events > this point). + to_key: The token to fetch rooms up to (fetching events <= this point). + excluded_room_ids: Optional list of room IDs to exclude from the results. + + Returns: + All meaningful membership changes to the current state in the token range. + Events are sorted by `stream_ordering` ascending. + + `event_id`/`sender` can be `None` when the server leaves a room (meaning + everyone locally left) or a state reset which removed the person from the + room. We can't tell the difference between the two cases with what's + available in the `current_state_delta_stream` table. To actually check for a + state reset, you need to check if a membership still exists in the room. + """ + + assert from_key.topological is None + assert to_key.topological is None + + # Start by ruling out cases where a DB query is not necessary. + if from_key == to_key: + return {} + + if from_key: + has_changed = self._membership_stream_cache.has_entity_changed( + user_id, int(from_key.stream) + ) + if not has_changed: + return {} + + room_ids_to_exclude: AbstractSet[str] = set() + if excluded_room_ids is not None: + room_ids_to_exclude = excluded_room_ids + + def f(txn: LoggingTransaction) -> Dict[str, RoomsForUserStateReset]: + # To handle tokens with a non-empty instance_map we fetch more + # results than necessary and then filter down + min_from_id = from_key.stream + max_to_id = to_key.get_max_stream_pos() + + # This query looks at membership changes in + # `sliding_sync_membership_snapshots` which will not include users + # that were state reset out of rooms; so we need to look for that + # case in `current_state_delta_stream`. + sql = """ + SELECT + room_id, + membership_event_id, + event_instance_name, + event_stream_ordering, + membership, + sender, + prev_membership, + room_version + FROM + ( + SELECT + s.room_id, + s.membership_event_id, + s.event_instance_name, + s.event_stream_ordering, + s.membership, + s.sender, + m_prev.membership AS prev_membership + FROM sliding_sync_membership_snapshots as s + LEFT JOIN event_edges AS e ON e.event_id = s.membership_event_id + LEFT JOIN room_memberships AS m_prev ON m_prev.event_id = e.prev_event_id + WHERE s.user_id = ? + + UNION ALL + + SELECT + s.room_id, + e.event_id, + s.instance_name, + s.stream_id, + m.membership, + e.sender, + m_prev.membership AS prev_membership + FROM current_state_delta_stream AS s + LEFT JOIN events AS e ON e.event_id = s.event_id + LEFT JOIN room_memberships AS m ON m.event_id = s.event_id + LEFT JOIN room_memberships AS m_prev ON m_prev.event_id = s.prev_event_id + WHERE + s.type = ? + AND s.state_key = ? + ) AS c + INNER JOIN rooms USING (room_id) + WHERE event_stream_ordering > ? AND event_stream_ordering <= ? + ORDER BY event_stream_ordering ASC + """ + + txn.execute( + sql, + (user_id, EventTypes.Member, user_id, min_from_id, max_to_id), + ) + + membership_changes: Dict[str, RoomsForUserStateReset] = {} + for ( + room_id, + membership_event_id, + event_instance_name, + event_stream_ordering, + membership, + sender, + prev_membership, + room_version_id, + ) in txn: + assert room_id is not None + assert event_stream_ordering is not None + + if room_id in room_ids_to_exclude: + continue + + if _filter_results_by_stream( + from_key, + to_key, + event_instance_name, + event_stream_ordering, + ): + # When the server leaves a room, it will insert new rows into the + # `current_state_delta_stream` table with `event_id = null` for all + # current state. This means we might already have a row for the + # leave event and then another for the same leave where the + # `event_id=null` but the `prev_event_id` is pointing back at the + # earlier leave event. We don't want to report the leave, if we + # already have a leave event. + if ( + membership_event_id is None + and prev_membership == Membership.LEAVE + ): + continue + + if membership_event_id is None and room_id in membership_changes: + # SUSPICIOUS: if we join a room and get state reset out of it + # in the same queried window, + # won't this ignore the 'state reset out of it' part? + continue + + # When `s.event_id = null`, we won't be able to get respective + # `room_membership` but can assume the user has left the room + # because this only happens when the server leaves a room + # (meaning everyone locally left) or a state reset which removed + # the person from the room. + membership = ( + membership if membership is not None else Membership.LEAVE + ) + + if membership == prev_membership: + # If `membership` and `prev_membership` are the same then this + # is not a meaningful change so we can skip it. + # An example of this happening is when the user changes their display name. + continue + + membership_change = RoomsForUserStateReset( + room_id=room_id, + sender=sender, + membership=membership, + event_id=membership_event_id, + event_pos=PersistedEventPosition( + event_instance_name, event_stream_ordering + ), + room_version_id=room_version_id, + ) + + membership_changes[room_id] = membership_change + + return membership_changes + + membership_changes = await self.db_pool.runInteraction( + "get_sliding_sync_membership_changes", f + ) + + return membership_changes + @cancellable async def get_membership_changes_for_user( self, @@ -1121,9 +1389,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): AND e.stream_ordering > ? AND e.stream_ordering <= ? %s ORDER BY e.stream_ordering ASC - """ % ( - ignore_room_clause, - ) + """ % (ignore_room_clause,) txn.execute(sql, args) @@ -1192,7 +1458,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if limit == 0: return [], end_token - rows, token = await self.db_pool.runInteraction( + rows, token, _ = await self.db_pool.runInteraction( "get_recent_event_ids_for_room", self._paginate_room_events_by_topological_ordering_txn, room_id, @@ -1263,12 +1529,76 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): return None + async def get_last_event_pos_in_room( + self, + room_id: str, + event_types: Optional[StrCollection] = None, + ) -> Optional[Tuple[str, PersistedEventPosition]]: + """ + Returns the ID and event position of the last event in a room. + + Based on `get_last_event_pos_in_room_before_stream_ordering(...)` + + Args: + room_id + event_types: Optional allowlist of event types to filter by + + Returns: + The ID of the most recent event and it's position, or None if there are no + events in the room that match the given event types. + """ + + def _get_last_event_pos_in_room_txn( + txn: LoggingTransaction, + ) -> Optional[Tuple[str, PersistedEventPosition]]: + event_type_clause = "" + event_type_args: List[str] = [] + if event_types is not None and len(event_types) > 0: + event_type_clause, event_type_args = make_in_list_sql_clause( + txn.database_engine, "type", event_types + ) + event_type_clause = f"AND {event_type_clause}" + + sql = f""" + SELECT event_id, stream_ordering, instance_name + FROM events + LEFT JOIN rejections USING (event_id) + WHERE room_id = ? + {event_type_clause} + AND NOT outlier + AND rejections.event_id IS NULL + ORDER BY stream_ordering DESC + LIMIT 1 + """ + + txn.execute( + sql, + [room_id] + event_type_args, + ) + + row = cast(Optional[Tuple[str, int, str]], txn.fetchone()) + if row is not None: + event_id, stream_ordering, instance_name = row + + return event_id, PersistedEventPosition( + # If instance_name is null we default to "master" + instance_name or "master", + stream_ordering, + ) + + return None + + return await self.db_pool.runInteraction( + "get_last_event_pos_in_room", + _get_last_event_pos_in_room_txn, + ) + @trace async def get_last_event_pos_in_room_before_stream_ordering( self, room_id: str, end_token: RoomStreamToken, - event_types: Optional[Collection[str]] = None, + event_types: Optional[StrCollection] = None, ) -> Optional[Tuple[str, PersistedEventPosition]]: """ Returns the ID and event position of the last event in a room at or before a @@ -1381,8 +1711,56 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): rooms """ + # First we just get the latest positions for the room, as the vast + # majority of them will be before the given end token anyway. By doing + # this we can cache most rooms. + uncapped_results = await self._bulk_get_max_event_pos(room_ids) + + # Check that the stream position for the rooms are from before the + # minimum position of the token. If not then we need to fetch more + # rows. + results: Dict[str, int] = {} + recheck_rooms: Set[str] = set() min_token = end_token.stream - max_token = end_token.get_max_stream_pos() + for room_id, stream in uncapped_results.items(): + if stream is None: + # Despite the function not directly setting None, the cache can! + # See: https://github.com/element-hq/synapse/issues/17726 + continue + if stream <= min_token: + results[room_id] = stream + else: + recheck_rooms.add(room_id) + + if not recheck_rooms: + return results + + # There shouldn't be many rooms that we need to recheck, so we do them + # one-by-one. + for room_id in recheck_rooms: + result = await self.get_last_event_pos_in_room_before_stream_ordering( + room_id, end_token + ) + if result is not None: + results[room_id] = result[1].stream + + return results + + @cached() + async def _get_max_event_pos(self, room_id: str) -> int: + raise NotImplementedError() + + @cachedList(cached_method_name="_get_max_event_pos", list_name="room_ids") + async def _bulk_get_max_event_pos( + self, room_ids: StrCollection + ) -> Mapping[str, Optional[int]]: + """Fetch the max position of a persisted event in the room.""" + + # We need to be careful not to return positions ahead of the current + # positions, so we get the current token now and cap our queries to it. + now_token = self.get_room_max_token() + max_pos = now_token.get_max_stream_pos() + results: Dict[str, int] = {} # First, we check for the rooms in the stream change cache to see if we @@ -1390,31 +1768,32 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): missing_room_ids: Set[str] = set() for room_id in room_ids: stream_pos = self._events_stream_cache.get_max_pos_of_last_change(room_id) - if stream_pos and stream_pos <= min_token: + if stream_pos is not None: results[room_id] = stream_pos else: missing_room_ids.add(room_id) + if not missing_room_ids: + return results + # Next, we query the stream position from the DB. At first we fetch all # positions less than the *max* stream pos in the token, then filter # them down. We do this as a) this is a cheaper query, and b) the vast # majority of rooms will have a latest token from before the min stream # pos. - def bulk_get_last_event_pos_txn( - txn: LoggingTransaction, batch_room_ids: StrCollection + def bulk_get_max_event_pos_fallback_txn( + txn: LoggingTransaction, batched_room_ids: StrCollection ) -> Dict[str, int]: - # This query fetches the latest stream position in the rooms before - # the given max position. clause, args = make_in_list_sql_clause( - self.database_engine, "room_id", batch_room_ids + self.database_engine, "room_id", batched_room_ids ) sql = f""" SELECT room_id, ( SELECT stream_ordering FROM events AS e LEFT JOIN rejections USING (event_id) WHERE e.room_id = r.room_id - AND stream_ordering <= ? + AND e.stream_ordering <= ? AND NOT outlier AND rejection_reason IS NULL ORDER BY stream_ordering DESC @@ -1423,72 +1802,55 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): FROM rooms AS r WHERE {clause} """ - txn.execute(sql, [max_token] + args) + txn.execute(sql, [max_pos] + args) return {row[0]: row[1] for row in txn} - recheck_rooms: Set[str] = set() - for batched in batch_iter(missing_room_ids, 1000): - result = await self.db_pool.runInteraction( - "bulk_get_last_event_pos_in_room_before_stream_ordering", - bulk_get_last_event_pos_txn, - batched, - ) - - # Check that the stream position for the rooms are from before the - # minimum position of the token. If not then we need to fetch more - # rows. - for room_id, stream in result.items(): - if stream <= min_token: - results[room_id] = stream - else: - recheck_rooms.add(room_id) - - if not recheck_rooms: - return results - - # For the remaining rooms we need to fetch all rows between the min and - # max stream positions in the end token, and filter out the rows that - # are after the end token. - # - # This query should be fast as the range between the min and max should - # be small. - - def bulk_get_last_event_pos_recheck_txn( - txn: LoggingTransaction, batch_room_ids: StrCollection + # It's easier to look at the `sliding_sync_joined_rooms` table and avoid all of + # the joins and sub-queries. + def bulk_get_max_event_pos_from_sliding_sync_tables_txn( + txn: LoggingTransaction, batched_room_ids: StrCollection ) -> Dict[str, int]: clause, args = make_in_list_sql_clause( - self.database_engine, "room_id", batch_room_ids + self.database_engine, "room_id", batched_room_ids ) sql = f""" - SELECT room_id, instance_name, stream_ordering - FROM events - WHERE ? < stream_ordering AND stream_ordering <= ? - AND NOT outlier - AND rejection_reason IS NULL - AND {clause} - ORDER BY stream_ordering ASC + SELECT room_id, event_stream_ordering + FROM sliding_sync_joined_rooms + WHERE {clause} + ORDER BY event_stream_ordering DESC """ - txn.execute(sql, [min_token, max_token] + args) - - # We take the max stream ordering that is less than the token. Since - # we ordered by stream ordering we just need to iterate through and - # take the last matching stream ordering. - txn_results: Dict[str, int] = {} - for row in txn: - room_id = row[0] - event_pos = PersistedEventPosition(row[1], row[2]) - if not event_pos.persisted_after(end_token): - txn_results[room_id] = event_pos.stream - - return txn_results - - for batched in batch_iter(recheck_rooms, 1000): - recheck_result = await self.db_pool.runInteraction( - "bulk_get_last_event_pos_in_room_before_stream_ordering_recheck", - bulk_get_last_event_pos_recheck_txn, - batched, + txn.execute(sql, args) + return {row[0]: row[1] for row in txn} + + recheck_rooms: Set[str] = set() + for batched in batch_iter(room_ids, 1000): + if await self.have_finished_sliding_sync_background_jobs(): + batch_results = await self.db_pool.runInteraction( + "bulk_get_max_event_pos_from_sliding_sync_tables_txn", + bulk_get_max_event_pos_from_sliding_sync_tables_txn, + batched, + ) + else: + batch_results = await self.db_pool.runInteraction( + "bulk_get_max_event_pos_fallback_txn", + bulk_get_max_event_pos_fallback_txn, + batched, + ) + for room_id, stream_ordering in batch_results.items(): + if stream_ordering <= now_token.stream: + results[room_id] = stream_ordering + else: + recheck_rooms.add(room_id) + + # We now need to handle rooms where the above query returned a stream + # position that was potentially too new. This should happen very rarely + # so we just query the rooms one-by-one + for room_id in recheck_rooms: + result = await self.get_last_event_pos_in_room_before_stream_ordering( + room_id, now_token ) - results.update(recheck_result) + if result is not None: + results[room_id] = result[1].stream return results @@ -1680,15 +2042,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): dict """ - stream_ordering, topological_ordering = cast( - Tuple[int, int], - self.db_pool.simple_select_one_txn( - txn, - "events", - keyvalues={"event_id": event_id, "room_id": room_id}, - retcols=["stream_ordering", "topological_ordering"], - ), + row = self.db_pool.simple_select_one_txn( + txn, + "events", + keyvalues={"event_id": event_id, "room_id": room_id}, + retcols=("stream_ordering", "topological_ordering"), ) + stream_ordering = int(row[0]) + topological_ordering = int(row[1]) # Paginating backwards includes the event at the token, but paginating # forward doesn't. @@ -1700,7 +2061,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): topological=topological_ordering, stream=stream_ordering ) - rows, start_token = self._paginate_room_events_by_topological_ordering_txn( + rows, start_token, _ = self._paginate_room_events_by_topological_ordering_txn( txn, room_id, before_token, @@ -1710,7 +2071,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) events_before = [r.event_id for r in rows] - rows, end_token = self._paginate_room_events_by_topological_ordering_txn( + rows, end_token, _ = self._paginate_room_events_by_topological_ordering_txn( txn, room_id, after_token, @@ -1882,7 +2243,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): direction: Direction = Direction.BACKWARDS, limit: int = 0, event_filter: Optional[Filter] = None, - ) -> Tuple[List[_EventDictReturn], RoomStreamToken]: + ) -> Tuple[List[_EventDictReturn], RoomStreamToken, bool]: """Returns list of events before or after a given token. Args: @@ -1897,10 +2258,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): those that match the filter. Returns: - A list of _EventDictReturn and a token that points to the end of the - result set. If no events are returned then the end of the stream has - been reached (i.e. there are no events between `from_token` and - `to_token`), or `limit` is zero. + A list of _EventDictReturn, a token that points to the end of the + result set, and a boolean to indicate if there were more events but + we hit the limit. If no events are returned then the end of the + stream has been reached (i.e. there are no events between + `from_token` and `to_token`), or `limit` is zero. """ # We can bail early if we're looking forwards, and our `to_key` is already # before our `from_token`. @@ -1910,7 +2272,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and to_token.is_before_or_eq(from_token) ): # Token selection matches what we do below if there are no rows - return [], to_token if to_token else from_token + return [], to_token if to_token else from_token, False # Or vice-versa, if we're looking backwards and our `from_token` is already before # our `to_token`. elif ( @@ -1919,7 +2281,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and from_token.is_before_or_eq(to_token) ): # Token selection matches what we do below if there are no rows - return [], to_token if to_token else from_token + return [], to_token if to_token else from_token, False args: List[Any] = [room_id] @@ -1942,6 +2304,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): args.extend(filter_args) # We fetch more events as we'll filter the result set + requested_limit = int(limit) * 2 args.append(int(limit) * 2) select_keywords = "SELECT" @@ -2006,10 +2369,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): } txn.execute(sql, args) + # Get all the rows and check if we hit the limit. + fetched_rows = txn.fetchall() + limited = len(fetched_rows) >= requested_limit + # Filter the result set. rows = [ _EventDictReturn(event_id, topological_ordering, stream_ordering) - for event_id, instance_name, topological_ordering, stream_ordering in txn + for event_id, instance_name, topological_ordering, stream_ordering in fetched_rows if _filter_results( lower_token=( to_token if direction == Direction.BACKWARDS else from_token @@ -2021,7 +2388,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): topological_ordering=topological_ordering, stream_ordering=stream_ordering, ) - ][:limit] + ] + + if len(rows) > limit: + limited = True + + rows = rows[:limit] if rows: assert rows[-1].topological_ordering is not None @@ -2032,7 +2404,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # TODO (erikj): We should work out what to do here instead. next_token = to_token if to_token else from_token - return rows, next_token + return rows, next_token, limited @trace @tag_args @@ -2045,7 +2417,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): direction: Direction = Direction.BACKWARDS, limit: int = 0, event_filter: Optional[Filter] = None, - ) -> Tuple[List[EventBase], RoomStreamToken]: + ) -> Tuple[List[EventBase], RoomStreamToken, bool]: """ Paginate events by `topological_ordering` (tie-break with `stream_ordering`) in the room from the `from_key` in the given `direction` to the `to_key` or @@ -2062,8 +2434,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): event_filter: If provided filters the events to those that match the filter. Returns: - The results as a list of events and a token that points to the end - of the result set. If no events are returned then the end of the + The results as a list of events, a token that points to the end of + the result set, and a boolean to indicate if there were more events + but we hit the limit. If no events are returned then the end of the stream has been reached (i.e. there are no events between `from_key` and `to_key`). @@ -2087,7 +2460,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ): # Token selection matches what we do in `_paginate_room_events_txn` if there # are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False # Or vice-versa, if we're looking backwards and our `from_key` is already before # our `to_key`. elif ( @@ -2097,9 +2470,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ): # Token selection matches what we do in `_paginate_room_events_txn` if there # are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False - rows, token = await self.db_pool.runInteraction( + rows, token, limited = await self.db_pool.runInteraction( "paginate_room_events_by_topological_ordering", self._paginate_room_events_by_topological_ordering_txn, room_id, @@ -2114,7 +2487,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): [r.event_id for r in rows], get_prev_content=True ) - return events, token + return events, token, limited @cached() async def get_id_for_instance(self, instance_name: str) -> int: diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py
index b5af294384..97b190bccc 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py
@@ -158,9 +158,56 @@ class TagsWorkerStore(AccountDataWorkerStore): return results + async def has_tags_changed_for_room( + self, + # Since there are multiple arguments with the same type, force keyword arguments + # so people don't accidentally swap the order + *, + user_id: str, + room_id: str, + from_stream_id: int, + to_stream_id: int, + ) -> bool: + """Check if the users tags for a room have been updated in the token range + + (> `from_stream_id` and <= `to_stream_id`) + + Args: + user_id: The user to get tags for + room_id: The room to get tags for + from_stream_id: The point in the stream to fetch from + to_stream_id: The point in the stream to fetch to + + Returns: + A mapping of tags to tag content. + """ + + # Shortcut if no room has changed for the user + changed = self._account_data_stream_cache.has_entity_changed( + user_id, int(from_stream_id) + ) + if not changed: + return False + + last_change_position_for_room = await self.db_pool.simple_select_one_onecol( + table="room_tags_revisions", + keyvalues={"user_id": user_id, "room_id": room_id}, + retcol="stream_id", + allow_none=True, + ) + + if last_change_position_for_room is None: + return False + + return ( + last_change_position_for_room > from_stream_id + and last_change_position_for_room <= to_stream_id + ) + + @cached(num_args=2, tree=True) async def get_tags_for_room( self, user_id: str, room_id: str - ) -> Dict[str, JsonDict]: + ) -> Mapping[str, JsonMapping]: """Get all the tags for the given room Args: @@ -182,7 +229,7 @@ class TagsWorkerStore(AccountDataWorkerStore): return {tag: db_to_json(content) for tag, content in rows} async def add_tag_to_room( - self, user_id: str, room_id: str, tag: str, content: JsonDict + self, user_id: str, room_id: str, tag: str, content: JsonMapping ) -> int: """Add a tag to a room for a user. @@ -213,6 +260,7 @@ class TagsWorkerStore(AccountDataWorkerStore): await self.db_pool.runInteraction("add_tag", add_tag_txn, next_id) self.get_tags_for_user.invalidate((user_id,)) + self.get_tags_for_room.invalidate((user_id, room_id)) return self._account_data_id_gen.get_current_token() @@ -226,10 +274,7 @@ class TagsWorkerStore(AccountDataWorkerStore): assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator) def remove_tag_txn(txn: LoggingTransaction, next_id: int) -> None: - sql = ( - "DELETE FROM room_tags " - " WHERE user_id = ? AND room_id = ? AND tag = ?" - ) + sql = "DELETE FROM room_tags WHERE user_id = ? AND room_id = ? AND tag = ?" txn.execute(sql, (user_id, room_id, tag)) self._update_revision_txn(txn, user_id, room_id, next_id) @@ -237,6 +282,7 @@ class TagsWorkerStore(AccountDataWorkerStore): await self.db_pool.runInteraction("remove_tag", remove_tag_txn, next_id) self.get_tags_for_user.invalidate((user_id,)) + self.get_tags_for_room.invalidate((user_id, room_id)) return self._account_data_id_gen.get_current_token() @@ -290,9 +336,19 @@ class TagsWorkerStore(AccountDataWorkerStore): rows: Iterable[Any], ) -> None: if stream_name == AccountDataStream.NAME: - for row in rows: + # Cast is safe because the `AccountDataStream` should only be giving us + # `AccountDataStreamRow` + account_data_stream_rows: List[AccountDataStream.AccountDataStreamRow] = ( + cast(List[AccountDataStream.AccountDataStreamRow], rows) + ) + + for row in account_data_stream_rows: if row.data_type == AccountDataTypes.TAG: self.get_tags_for_user.invalidate((row.user_id,)) + if row.room_id: + self.get_tags_for_room.invalidate((row.user_id, row.room_id)) + else: + self.get_tags_for_room.invalidate((row.user_id,)) self._account_data_stream_cache.entity_has_changed( row.user_id, token ) diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index 770802483c..bfc324b80d 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py
@@ -86,10 +86,10 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): @wrap_as_background_process("cleanup_transactions") async def _cleanup_transactions(self) -> None: now = self._clock.time_msec() - month_ago = now - 30 * 24 * 60 * 60 * 1000 + day_ago = now - 24 * 60 * 60 * 1000 def _cleanup_transactions_txn(txn: LoggingTransaction) -> None: - txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,)) + txn.execute("DELETE FROM received_transactions WHERE ts < ?", (day_ago,)) await self.db_pool.runInteraction( "_cleanup_transactions", _cleanup_transactions_txn diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py
index 6e18f714d7..31a8ce6666 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py
@@ -31,6 +31,7 @@ from typing import ( Sequence, Set, Tuple, + TypedDict, cast, ) @@ -42,10 +43,9 @@ try: USE_ICU = True except ModuleNotFoundError: + # except ModuleNotFoundError: USE_ICU = False -from typing_extensions import TypedDict - from synapse.api.errors import StoreError from synapse.util.stringutils import non_null_str_or_none @@ -224,9 +224,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): SELECT room_id, events FROM %s ORDER BY events DESC LIMIT 250 - """ % ( - TEMP_TABLE + "_rooms", - ) + """ % (TEMP_TABLE + "_rooms",) txn.execute(sql) rooms_to_work_on = cast(List[Tuple[str, int]], txn.fetchall()) @@ -585,9 +583,9 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): retry_counter: number of failures in refreshing the profile so far. Used for exponential backoff calculations. """ - assert not self.hs.is_mine_id( - user_id - ), "Can't mark a local user as a stale remote user." + assert not self.hs.is_mine_id(user_id), ( + "Can't mark a local user as a stale remote user." + ) server_name = UserID.from_string(user_id).domain @@ -1040,11 +1038,11 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): } """ + join_args: Tuple[str, ...] = (user_id,) + if self.hs.config.userdirectory.user_directory_search_all_users: - join_args = (user_id,) where_clause = "user_id != ?" else: - join_args = (user_id,) where_clause = """ ( EXISTS (select 1 from users_in_public_rooms WHERE user_id = t.user_id) @@ -1058,6 +1056,14 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): if not show_locked_users: where_clause += " AND (u.locked IS NULL OR u.locked = FALSE)" + # Adjust the JOIN type based on the exclude_remote_users flag (the users + # table only contains local users so an inner join is a good way to + # to exclude remote users) + if self.hs.config.userdirectory.user_directory_exclude_remote_users: + join_type = "JOIN" + else: + join_type = "LEFT JOIN" + # We allow manipulating the ranking algorithm by injecting statements # based on config options. additional_ordering_statements = [] @@ -1089,7 +1095,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): SELECT d.user_id AS user_id, display_name, avatar_url FROM matching_users as t INNER JOIN user_directory AS d USING (user_id) - LEFT JOIN users AS u ON t.user_id = u.name + %(join_type)s users AS u ON t.user_id = u.name WHERE %(where_clause)s ORDER BY @@ -1118,6 +1124,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): """ % { "where_clause": where_clause, "order_case_statements": " ".join(additional_ordering_statements), + "join_type": join_type, } args = ( (full_query,) @@ -1145,7 +1152,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): SELECT d.user_id AS user_id, display_name, avatar_url FROM user_directory_search as t INNER JOIN user_directory AS d USING (user_id) - LEFT JOIN users AS u ON t.user_id = u.name + %(join_type)s users AS u ON t.user_id = u.name WHERE %(where_clause)s AND value MATCH ? @@ -1158,6 +1165,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): """ % { "where_clause": where_clause, "order_statements": " ".join(additional_ordering_statements), + "join_type": join_type, } args = join_args + (search_query,) + ordering_arguments + (limit + 1,) else: @@ -1240,7 +1248,13 @@ def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]: search_term = _filter_text_for_index(search_term) escaped_words = [] - for word in _parse_words(search_term): + for index, word in enumerate(_parse_words(search_term)): + if index >= 10: + # We limit how many terms we include, as otherwise it can use + # excessive database time if people accidentally search for large + # strings. + break + # Postgres tsvector and tsquery quoting rules: # words potentially containing punctuation should be quoted # and then existing quotes and backslashes should be doubled diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py
index ea7d8199a7..5b594fe8dd 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py
@@ -20,7 +20,15 @@ # import logging -from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Union +from typing import ( + TYPE_CHECKING, + Dict, + List, + Mapping, + Optional, + Tuple, + Union, +) from synapse.logging.opentracing import tag_args, trace from synapse.storage._base import SQLBaseStore @@ -112,8 +120,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore): Returns: Map from state_group to a StateMap at that point. """ - - state_filter = state_filter or StateFilter.all() + if state_filter is None: + state_filter = StateFilter.all() results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups} @@ -388,8 +396,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): return True, count txn.execute( - "SELECT state_group FROM state_group_edges" - " WHERE state_group = ?", + "SELECT state_group FROM state_group_edges WHERE state_group = ?", (state_group,), ) diff --git a/synapse/storage/databases/state/deletion.py b/synapse/storage/databases/state/deletion.py new file mode 100644
index 0000000000..f77c46f6ae --- /dev/null +++ b/synapse/storage/databases/state/deletion.py
@@ -0,0 +1,561 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + + +import contextlib +from typing import ( + TYPE_CHECKING, + AbstractSet, + AsyncIterator, + Collection, + Mapping, + Optional, + Set, + Tuple, +) + +from synapse.events import EventBase +from synapse.events.snapshot import EventContext +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, + make_in_list_sql_clause, +) +from synapse.storage.engines import PostgresEngine +from synapse.util.stringutils import shortstr + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class StateDeletionDataStore: + """Manages deletion of state groups in a safe manner. + + Deleting state groups is challenging as before we actually delete them we + need to ensure that there are no in-flight events that refer to the state + groups that we want to delete. + + To handle this, we take two approaches. First, before we persist any event + we ensure that the state group still exists and mark in the + `state_groups_persisting` table that the state group is about to be used. + (Note that we have to have the extra table here as state groups and events + can be in different databases, and thus we can't check for the existence of + state groups in the persist event transaction). Once the event has been + persisted, we can remove the row from `state_groups_persisting`. So long as + we check that table before deleting state groups, we can ensure that we + never persist events that reference deleted state groups, maintaining + database integrity. + + However, we want to avoid throwing exceptions so deep in the process of + persisting events. So instead of deleting state groups immediately, we mark + them as pending/proposed for deletion and wait for a certain amount of time + before performing the deletion. When we come to handle new events that + reference state groups, we check if they are pending deletion and bump the + time for when they'll be deleted (to give a chance for the event to be + persisted, or not). + + When deleting, we need to check that state groups remain unreferenced. There + is a race here where we a) fetch state groups that are ready for deletion, + b) check they're unreferenced, c) the state group becomes referenced but + then gets marked as pending deletion again, d) during the deletion + transaction we recheck `state_groups_pending_deletion` table again and see + that it exists and so continue with the deletion. To prevent this from + happening we add a `sequence_number` column to + `state_groups_pending_deletion`, and during deletion we ensure that for a + state group we're about to delete that the sequence number doesn't change + between steps (a) and (d). So long as we always bump the sequence number + whenever an event may become used the race can never happen. + """ + + # How long to wait before we delete state groups. This should be long enough + # for any in-flight events to be persisted. If events take longer to persist + # and any of the state groups they reference have been deleted, then the + # event will fail to persist (as well as any event in the same batch). + DELAY_BEFORE_DELETION_MS = 10 * 60 * 1000 + + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + self._clock = hs.get_clock() + self.db_pool = database + self._instance_name = hs.get_instance_name() + + with db_conn.cursor(txn_name="_clear_existing_persising") as txn: + self._clear_existing_persising(txn) + + def _clear_existing_persising(self, txn: LoggingTransaction) -> None: + """On startup we clear any entries in `state_groups_persisting` that + match our instance name, in case of a previous unclean shutdown""" + + self.db_pool.simple_delete_txn( + txn, + table="state_groups_persisting", + keyvalues={"instance_name": self._instance_name}, + ) + + async def check_state_groups_and_bump_deletion( + self, state_groups: AbstractSet[int] + ) -> Collection[int]: + """Checks to make sure that the state groups haven't been deleted, and + if they're pending deletion we delay it (allowing time for any event + that will use them to finish persisting). + + Returns: + The state groups that are missing, if any. + """ + + return await self.db_pool.runInteraction( + "check_state_groups_and_bump_deletion", + self._check_state_groups_and_bump_deletion_txn, + state_groups, + # We don't need to lock if we're just doing a quick check, as the + # lock doesn't prevent any races here. + lock=False, + ) + + def _check_state_groups_and_bump_deletion_txn( + self, txn: LoggingTransaction, state_groups: AbstractSet[int], lock: bool = True + ) -> Collection[int]: + """Checks to make sure that the state groups haven't been deleted, and + if they're pending deletion we delay it (allowing time for any event + that will use them to finish persisting). + + The `lock` flag sets if we should lock the `state_group` rows we're + checking, which we should do when storing new groups. + + Returns: + The state groups that are missing, if any. + """ + + existing_state_groups = self._get_existing_groups_with_lock( + txn, state_groups, lock=lock + ) + + self._bump_deletion_txn(txn, existing_state_groups) + + missing_state_groups = state_groups - existing_state_groups + if missing_state_groups: + return missing_state_groups + + return () + + def _bump_deletion_txn( + self, txn: LoggingTransaction, state_groups: Collection[int] + ) -> None: + """Update any pending deletions of the state group that they may now be + referenced.""" + + if not state_groups: + return + + now = self._clock.time_msec() + if isinstance(self.db_pool.engine, PostgresEngine): + clause, args = make_in_list_sql_clause( + self.db_pool.engine, "state_group", state_groups + ) + sql = f""" + UPDATE state_groups_pending_deletion + SET sequence_number = DEFAULT, insertion_ts = ? + WHERE {clause} + """ + args.insert(0, now) + txn.execute(sql, args) + else: + rows = self.db_pool.simple_select_many_txn( + txn, + table="state_groups_pending_deletion", + column="state_group", + iterable=state_groups, + keyvalues={}, + retcols=("state_group",), + ) + if not rows: + return + + state_groups_to_update = [state_group for (state_group,) in rows] + + self.db_pool.simple_delete_many_txn( + txn, + table="state_groups_pending_deletion", + column="state_group", + values=state_groups_to_update, + keyvalues={}, + ) + self.db_pool.simple_insert_many_txn( + txn, + table="state_groups_pending_deletion", + keys=("state_group", "insertion_ts"), + values=[(state_group, now) for state_group in state_groups_to_update], + ) + + def _get_existing_groups_with_lock( + self, txn: LoggingTransaction, state_groups: Collection[int], lock: bool = True + ) -> AbstractSet[int]: + """Return which of the given state groups are in the database, and locks + those rows with `KEY SHARE` to ensure they don't get concurrently + deleted (if `lock` is true).""" + clause, args = make_in_list_sql_clause(self.db_pool.engine, "id", state_groups) + + sql = f""" + SELECT id FROM state_groups + WHERE {clause} + """ + if lock and isinstance(self.db_pool.engine, PostgresEngine): + # On postgres we add a row level lock to the rows to ensure that we + # conflict with any concurrent DELETEs. `FOR KEY SHARE` lock will + # not conflict with other read + sql += """ + FOR KEY SHARE + """ + + txn.execute(sql, args) + return {state_group for (state_group,) in txn} + + @contextlib.asynccontextmanager + async def persisting_state_group_references( + self, event_and_contexts: Collection[Tuple[EventBase, EventContext]] + ) -> AsyncIterator[None]: + """Wraps the persistence of the given events and contexts, ensuring that + any state groups referenced still exist and that they don't get deleted + during this.""" + + referenced_state_groups: Set[int] = set() + for event, ctx in event_and_contexts: + if ctx.rejected or event.internal_metadata.is_outlier(): + continue + + assert ctx.state_group is not None + + referenced_state_groups.add(ctx.state_group) + + if ctx.state_group_before_event: + referenced_state_groups.add(ctx.state_group_before_event) + + if not referenced_state_groups: + # We don't reference any state groups, so nothing to do + yield + return + + await self.db_pool.runInteraction( + "mark_state_groups_as_persisting", + self._mark_state_groups_as_persisting_txn, + referenced_state_groups, + ) + + error = True + try: + yield None + error = False + finally: + await self.db_pool.runInteraction( + "finish_persisting", + self._finish_persisting_txn, + referenced_state_groups, + error=error, + ) + + def _mark_state_groups_as_persisting_txn( + self, txn: LoggingTransaction, state_groups: Set[int] + ) -> None: + """Marks the given state groups as being persisted.""" + + existing_state_groups = self._get_existing_groups_with_lock(txn, state_groups) + missing_state_groups = state_groups - existing_state_groups + if missing_state_groups: + raise Exception( + f"state groups have been deleted: {shortstr(missing_state_groups)}" + ) + + self.db_pool.simple_insert_many_txn( + txn, + table="state_groups_persisting", + keys=("state_group", "instance_name"), + values=[(state_group, self._instance_name) for state_group in state_groups], + ) + + def _finish_persisting_txn( + self, txn: LoggingTransaction, state_groups: Collection[int], error: bool + ) -> None: + """Mark the state groups as having finished persistence. + + If `error` is true then we assume the state groups were not persisted, + and so we do not clear them from the pending deletion table. + """ + self.db_pool.simple_delete_many_txn( + txn, + table="state_groups_persisting", + column="state_group", + values=state_groups, + keyvalues={"instance_name": self._instance_name}, + ) + + if error: + # The state groups may or may not have been persisted, so we need to + # bump the deletion to ensure we recheck if they have become + # referenced. + self._bump_deletion_txn(txn, state_groups) + return + + self.db_pool.simple_delete_many_batch_txn( + txn, + table="state_groups_pending_deletion", + keys=("state_group",), + values=[(state_group,) for state_group in state_groups], + ) + + async def mark_state_groups_as_pending_deletion( + self, state_groups: Collection[int] + ) -> None: + """Mark the given state groups as pending deletion. + + If any of the state groups are already pending deletion, then those records are + left as is. + """ + + await self.db_pool.runInteraction( + "mark_state_groups_as_pending_deletion", + self._mark_state_groups_as_pending_deletion_txn, + state_groups, + ) + + def _mark_state_groups_as_pending_deletion_txn( + self, + txn: LoggingTransaction, + state_groups: Collection[int], + ) -> None: + sql = """ + INSERT INTO state_groups_pending_deletion (state_group, insertion_ts) + VALUES %s + ON CONFLICT (state_group) + DO NOTHING + """ + + now = self._clock.time_msec() + rows = [ + ( + state_group, + now, + ) + for state_group in state_groups + ] + if isinstance(txn.database_engine, PostgresEngine): + txn.execute_values(sql % ("?",), rows, fetch=False) + else: + txn.execute_batch(sql % ("(?, ?)",), rows) + + async def mark_state_groups_as_used(self, state_groups: Collection[int]) -> None: + """Mark the given state groups as now being referenced""" + + await self.db_pool.simple_delete_many( + table="state_groups_pending_deletion", + column="state_group", + iterable=state_groups, + keyvalues={}, + desc="mark_state_groups_as_used", + ) + + async def get_pending_deletions( + self, state_groups: Collection[int] + ) -> Mapping[int, int]: + """Get which state groups are pending deletion. + + Returns: + a mapping from state groups that are pending deletion to their + sequence number + """ + + rows = await self.db_pool.simple_select_many_batch( + table="state_groups_pending_deletion", + column="state_group", + iterable=state_groups, + retcols=("state_group", "sequence_number"), + keyvalues={}, + desc="get_pending_deletions", + ) + + return dict(rows) + + def get_state_groups_ready_for_potential_deletion_txn( + self, + txn: LoggingTransaction, + state_groups_to_sequence_numbers: Mapping[int, int], + ) -> Collection[int]: + """Given a set of state groups, return which state groups can + potentially be deleted. + + The state groups must have been checked to see if they remain + unreferenced before calling this function. + + Note: This must be called within the same transaction that the state + groups are deleted. + + Args: + state_groups_to_sequence_numbers: The state groups, and the sequence + numbers from before the state groups were checked to see if they + were unreferenced. + + Returns: + The subset of state groups that can safely be deleted + + """ + + if not state_groups_to_sequence_numbers: + return state_groups_to_sequence_numbers + + if isinstance(self.db_pool.engine, PostgresEngine): + # On postgres we want to lock the rows FOR UPDATE as early as + # possible to help conflicts. + clause, args = make_in_list_sql_clause( + self.db_pool.engine, "id", state_groups_to_sequence_numbers + ) + sql = f""" + SELECT id FROM state_groups + WHERE {clause} + FOR UPDATE + """ + txn.execute(sql, args) + + # Check the deletion status in the DB of the given state groups + clause, args = make_in_list_sql_clause( + self.db_pool.engine, + column="state_group", + iterable=state_groups_to_sequence_numbers, + ) + + sql = f""" + SELECT state_group, insertion_ts, sequence_number FROM ( + SELECT state_group, insertion_ts, sequence_number FROM state_groups_pending_deletion + UNION + SELECT state_group, null, null FROM state_groups_persisting + ) AS s + WHERE {clause} + """ + + txn.execute(sql, args) + + # The above query will return potentially two rows per state group (one + # for each table), so we track which state groups have enough time + # elapsed and which are not ready to be persisted. + ready_to_be_deleted = set() + not_ready_to_be_deleted = set() + + now = self._clock.time_msec() + for state_group, insertion_ts, sequence_number in txn: + if insertion_ts is None: + # A null insertion_ts means that we are currently persisting + # events that reference the state group, so we don't delete + # them. + not_ready_to_be_deleted.add(state_group) + continue + + # We know this can't be None if insertion_ts is not None + assert sequence_number is not None + + # Check if the sequence number has changed, if it has then it + # indicates that the state group may have become referenced since we + # checked. + if state_groups_to_sequence_numbers[state_group] != sequence_number: + not_ready_to_be_deleted.add(state_group) + continue + + if now - insertion_ts < self.DELAY_BEFORE_DELETION_MS: + # Not enough time has elapsed to allow us to delete. + not_ready_to_be_deleted.add(state_group) + continue + + ready_to_be_deleted.add(state_group) + + can_be_deleted = ready_to_be_deleted - not_ready_to_be_deleted + if not_ready_to_be_deleted: + # If there are any state groups that aren't ready to be deleted, + # then we also need to remove any state groups that are referenced + # by them. + clause, args = make_in_list_sql_clause( + self.db_pool.engine, + column="state_group", + iterable=state_groups_to_sequence_numbers, + ) + sql = f""" + WITH RECURSIVE ancestors(state_group) AS ( + SELECT DISTINCT prev_state_group + FROM state_group_edges WHERE {clause} + UNION + SELECT prev_state_group + FROM state_group_edges + INNER JOIN ancestors USING (state_group) + ) + SELECT state_group FROM ancestors + """ + txn.execute(sql, args) + + can_be_deleted.difference_update(state_group for (state_group,) in txn) + + return can_be_deleted + + async def get_next_state_group_collection_to_delete( + self, + ) -> Optional[Tuple[str, Mapping[int, int]]]: + """Get the next set of state groups to try and delete + + Returns: + 2-tuple of room_id and mapping of state groups to sequence number. + """ + return await self.db_pool.runInteraction( + "get_next_state_group_collection_to_delete", + self._get_next_state_group_collection_to_delete_txn, + ) + + def _get_next_state_group_collection_to_delete_txn( + self, + txn: LoggingTransaction, + ) -> Optional[Tuple[str, Mapping[int, int]]]: + """Implementation of `get_next_state_group_collection_to_delete`""" + + # We want to return chunks of state groups that were marked for deletion + # at the same time (this isn't necessary, just more efficient). We do + # this by looking for the oldest insertion_ts, and then pulling out all + # rows that have the same insertion_ts (and room ID). + now = self._clock.time_msec() + + sql = """ + SELECT room_id, insertion_ts + FROM state_groups_pending_deletion AS sd + INNER JOIN state_groups AS sg ON (id = sd.state_group) + LEFT JOIN state_groups_persisting AS sp USING (state_group) + WHERE insertion_ts < ? AND sp.state_group IS NULL + ORDER BY insertion_ts + LIMIT 1 + """ + txn.execute(sql, (now - self.DELAY_BEFORE_DELETION_MS,)) + row = txn.fetchone() + if not row: + return None + + (room_id, insertion_ts) = row + + sql = """ + SELECT state_group, sequence_number + FROM state_groups_pending_deletion AS sd + INNER JOIN state_groups AS sg ON (id = sd.state_group) + LEFT JOIN state_groups_persisting AS sp USING (state_group) + WHERE room_id = ? AND insertion_ts = ? AND sp.state_group IS NULL + ORDER BY insertion_ts + """ + txn.execute(sql, (room_id, insertion_ts)) + + return room_id, dict(txn) diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py
index d4ac74c1ee..c1a66dcba0 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py
@@ -22,10 +22,10 @@ import logging from typing import ( TYPE_CHECKING, - Collection, Dict, Iterable, List, + Mapping, Optional, Set, Tuple, @@ -36,7 +36,10 @@ import attr from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.events.snapshot import UnpersistedEventContext, UnpersistedEventContextBase +from synapse.events.snapshot import ( + UnpersistedEventContext, + UnpersistedEventContextBase, +) from synapse.logging.opentracing import tag_args, trace from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -45,6 +48,7 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStore +from synapse.storage.engines import PostgresEngine from synapse.storage.types import Cursor from synapse.storage.util.sequence import build_sequence_generator from synapse.types import MutableStateMap, StateKey, StateMap @@ -55,6 +59,7 @@ from synapse.util.cancellation import cancellable if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.databases.state.deletion import StateDeletionDataStore logger = logging.getLogger(__name__) @@ -83,8 +88,10 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): database: DatabasePool, db_conn: LoggingDatabaseConnection, hs: "HomeServer", + state_deletion_store: "StateDeletionDataStore", ): super().__init__(database, db_conn, hs) + self._state_deletion_store = state_deletion_store # Originally the state store used a single DictionaryCache to cache the # event IDs for the state types in a given state group to avoid hammering @@ -284,7 +291,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): Returns: Dict of state group to state map. """ - state_filter = state_filter or StateFilter.all() + if state_filter is None: + state_filter = StateFilter.all() member_filter, non_member_filter = state_filter.get_member_split() @@ -466,14 +474,15 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): Returns: A list of state groups """ - is_in_db = self.db_pool.simple_select_one_onecol_txn( - txn, - table="state_groups", - keyvalues={"id": prev_group}, - retcol="id", - allow_none=True, + + # We need to check that the prev group isn't about to be deleted + is_missing = ( + self._state_deletion_store._check_state_groups_and_bump_deletion_txn( + txn, + {prev_group}, + ) ) - if not is_in_db: + if is_missing: raise Exception( "Trying to persist state with unpersisted prev_group: %r" % (prev_group,) @@ -545,6 +554,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): for key, state_id in context.state_delta_due_to_event.items() ], ) + return events_and_context return await self.db_pool.runInteraction( @@ -600,14 +610,15 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): The state group if successfully created, or None if the state needs to be persisted as a full state. """ - is_in_db = self.db_pool.simple_select_one_onecol_txn( - txn, - table="state_groups", - keyvalues={"id": prev_group}, - retcol="id", - allow_none=True, + + # We need to check that the prev group isn't about to be deleted + is_missing = ( + self._state_deletion_store._check_state_groups_and_bump_deletion_txn( + txn, + {prev_group}, + ) ) - if not is_in_db: + if is_missing: raise Exception( "Trying to persist state with unpersisted prev_group: %r" % (prev_group,) @@ -725,8 +736,10 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): ) async def purge_unreferenced_state_groups( - self, room_id: str, state_groups_to_delete: Collection[int] - ) -> None: + self, + room_id: str, + state_groups_to_sequence_numbers: Mapping[int, int], + ) -> bool: """Deletes no longer referenced state groups and de-deltas any state groups that reference them. @@ -734,21 +747,31 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): room_id: The room the state groups belong to (must all be in the same room). state_groups_to_delete: Set of all state groups to delete. + + Returns: + Whether any state groups were actually deleted. """ - await self.db_pool.runInteraction( + return await self.db_pool.runInteraction( "purge_unreferenced_state_groups", self._purge_unreferenced_state_groups, room_id, - state_groups_to_delete, + state_groups_to_sequence_numbers, ) def _purge_unreferenced_state_groups( self, txn: LoggingTransaction, room_id: str, - state_groups_to_delete: Collection[int], - ) -> None: + state_groups_to_sequence_numbers: Mapping[int, int], + ) -> bool: + state_groups_to_delete = self._state_deletion_store.get_state_groups_ready_for_potential_deletion_txn( + txn, state_groups_to_sequence_numbers + ) + + if not state_groups_to_delete: + return False + logger.info( "[purge] found %i state groups to delete", len(state_groups_to_delete) ) @@ -767,7 +790,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): remaining_state_groups = { state_group - for state_group, in rows + for (state_group,) in rows if state_group not in state_groups_to_delete } @@ -804,13 +827,23 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): logger.info("[purge] removing redundant state groups") txn.execute_batch( "DELETE FROM state_groups_state WHERE state_group = ?", - ((sg,) for sg in state_groups_to_delete), + [(sg,) for sg in state_groups_to_delete], + ) + txn.execute_batch( + "DELETE FROM state_group_edges WHERE state_group = ?", + [(sg,) for sg in state_groups_to_delete], ) txn.execute_batch( "DELETE FROM state_groups WHERE id = ?", - ((sg,) for sg in state_groups_to_delete), + [(sg,) for sg in state_groups_to_delete], + ) + txn.execute_batch( + "DELETE FROM state_groups_pending_deletion WHERE state_group = ?", + [(sg,) for sg in state_groups_to_delete], ) + return True + @trace @tag_args async def get_previous_state_groups( @@ -829,7 +862,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): List[Tuple[int, int]], await self.db_pool.simple_select_many_batch( table="state_group_edges", - column="prev_state_group", + column="state_group", iterable=state_groups, keyvalues={}, retcols=("state_group", "prev_state_group"), @@ -839,60 +872,77 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): return dict(rows) - async def purge_room_state( - self, room_id: str, state_groups_to_delete: Collection[int] - ) -> None: - """Deletes all record of a room from state tables + @trace + @tag_args + async def get_next_state_groups( + self, state_groups: Iterable[int] + ) -> Dict[int, int]: + """Fetch the groups that have the given state groups as their previous + state groups. Args: - room_id: - state_groups_to_delete: State groups to delete + state_groups + + Returns: + A mapping from state group to previous state group. """ - logger.info("[purge] Starting state purge") - await self.db_pool.runInteraction( + rows = cast( + List[Tuple[int, int]], + await self.db_pool.simple_select_many_batch( + table="state_group_edges", + column="prev_state_group", + iterable=state_groups, + keyvalues={}, + retcols=("state_group", "prev_state_group"), + desc="get_next_state_groups", + ), + ) + + return dict(rows) + + async def purge_room_state(self, room_id: str) -> None: + return await self.db_pool.runInteraction( "purge_room_state", self._purge_room_state_txn, room_id, - state_groups_to_delete, ) - logger.info("[purge] Done with state purge") def _purge_room_state_txn( self, txn: LoggingTransaction, room_id: str, - state_groups_to_delete: Collection[int], ) -> None: - # first we have to delete the state groups states - logger.info("[purge] removing %s from state_groups_state", room_id) + # Delete all edges that reference a state group linked to room_id + logger.info("[purge] removing %s from state_group_edges", room_id) - self.db_pool.simple_delete_many_txn( - txn, - table="state_groups_state", - column="state_group", - values=state_groups_to_delete, - keyvalues={}, - ) + if isinstance(self.database_engine, PostgresEngine): + # Disable statement timeouts for this transaction; purging rooms can + # take a while! + txn.execute("SET LOCAL statement_timeout = 0") - # ... and the state group edges - logger.info("[purge] removing %s from state_group_edges", room_id) + txn.execute( + """ + DELETE FROM state_group_edges AS sge WHERE sge.state_group IN ( + SELECT id FROM state_groups AS sg WHERE sg.room_id = ? + )""", + (room_id,), + ) - self.db_pool.simple_delete_many_txn( - txn, - table="state_group_edges", - column="state_group", - values=state_groups_to_delete, - keyvalues={}, + # state_groups_state table has a room_id column but no index on it, unlike state_groups, + # so we delete them by matching the room_id through the state_groups table. + logger.info("[purge] removing %s from state_groups_state", room_id) + txn.execute( + """ + DELETE FROM state_groups_state AS sgs WHERE sgs.state_group IN ( + SELECT id FROM state_groups AS sg WHERE sg.room_id = ? + )""", + (room_id,), ) - # ... and the state groups logger.info("[purge] removing %s from state_groups", room_id) - - self.db_pool.simple_delete_many_txn( + self.db_pool.simple_delete_txn( txn, table="state_groups", - column="id", - values=state_groups_to_delete, - keyvalues={}, + keyvalues={"room_id": room_id}, ) diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py
index ad222e7e2d..9d82c59384 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py
@@ -28,6 +28,11 @@ if TYPE_CHECKING: from synapse.storage.database import LoggingDatabaseConnection +# A string that will be replaced with the appropriate auto increment directive +# for the database engine, expands to an auto incrementing integer primary key. +AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER = "$%AUTO_INCREMENT_PRIMARY_KEY%$" + + class IsolationLevel(IntEnum): READ_COMMITTED: int = 1 REPEATABLE_READ: int = 2 diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 90641d5a18..e4cd359201 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py
@@ -25,6 +25,7 @@ from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, Tuple, cast import psycopg2.extensions from synapse.storage.engines._base import ( + AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, BaseDatabaseEngine, IncorrectDatabaseSetup, IsolationLevel, @@ -98,8 +99,8 @@ class PostgresEngine( allow_unsafe_locale = self.config.get("allow_unsafe_locale", False) # Are we on a supported PostgreSQL version? - if not allow_outdated_version and self._version < 110000: - raise RuntimeError("Synapse requires PostgreSQL 11 or above.") + if not allow_outdated_version and self._version < 130000: + raise RuntimeError("Synapse requires PostgreSQL 13 or above.") with db_conn.cursor() as txn: txn.execute("SHOW SERVER_ENCODING") @@ -256,4 +257,10 @@ class PostgresEngine( executing the script in its own transaction. The script transaction is left open and it is the responsibility of the caller to commit it. """ + # Replace auto increment placeholder with the appropriate directive + script = script.replace( + AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, + "BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY", + ) + cursor.execute(f"COMMIT; BEGIN TRANSACTION; {script}") diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
index b11094c5c1..9d1795ebe5 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py
@@ -25,6 +25,7 @@ import threading from typing import TYPE_CHECKING, Any, List, Mapping, Optional from synapse.storage.engines import BaseDatabaseEngine +from synapse.storage.engines._base import AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER from synapse.storage.types import Cursor if TYPE_CHECKING: @@ -168,6 +169,11 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection, sqlite3.Cursor]): > first. No other implicit transaction control is performed; any transaction > control must be added to sql_script. """ + # Replace auto increment placeholder with the appropriate directive + script = script.replace( + AUTO_INCREMENT_PRIMARY_KEYPLACEHOLDER, "INTEGER PRIMARY KEY AUTOINCREMENT" + ) + # The implementation of `executescript` can be found at # https://github.com/python/cpython/blob/3.11/Modules/_sqlite/cursor.c#L1035. cursor.executescript(f"BEGIN TRANSACTION; {script}") diff --git a/synapse/storage/invite_rule.py b/synapse/storage/invite_rule.py new file mode 100644
index 0000000000..b9d9d1eb62 --- /dev/null +++ b/synapse/storage/invite_rule.py
@@ -0,0 +1,110 @@ +import logging +from enum import Enum +from typing import Optional, Pattern + +from matrix_common.regex import glob_to_regex + +from synapse.types import JsonMapping, UserID + +logger = logging.getLogger(__name__) + + +class InviteRule(Enum): + """Enum to define the action taken when an invite matches a rule.""" + + ALLOW = "allow" + BLOCK = "block" + IGNORE = "ignore" + + +class InviteRulesConfig: + """Class to determine if a given user permits an invite from another user, and the action to take.""" + + def __init__(self, account_data: Optional[JsonMapping]): + self.allowed_users: list[Pattern[str]] = [] + self.ignored_users: list[Pattern[str]] = [] + self.blocked_users: list[Pattern[str]] = [] + + self.allowed_servers: list[Pattern[str]] = [] + self.ignored_servers: list[Pattern[str]] = [] + self.blocked_servers: list[Pattern[str]] = [] + + def process_field( + values: Optional[list[str]], + ruleset: list[Pattern[str]], + rule: InviteRule, + ) -> None: + if isinstance(values, list): + for value in values: + if isinstance(value, str) and len(value) > 0: + # User IDs cannot exceed 255 bytes. Don't process large, potentially + # expensive glob patterns. + if len(value) > 255: + logger.debug( + "Ignoring invite config glob pattern that is >255 bytes: {value}" + ) + continue + + try: + ruleset.append(glob_to_regex(value)) + except Exception as e: + # If for whatever reason we can't process this, just ignore it. + logger.debug( + f"Could not process '{value}' field of invite rule config, ignoring: {e}" + ) + + if account_data: + process_field( + account_data.get("allowed_users"), self.allowed_users, InviteRule.ALLOW + ) + process_field( + account_data.get("ignored_users"), self.ignored_users, InviteRule.IGNORE + ) + process_field( + account_data.get("blocked_users"), self.blocked_users, InviteRule.BLOCK + ) + process_field( + account_data.get("allowed_servers"), + self.allowed_servers, + InviteRule.ALLOW, + ) + process_field( + account_data.get("ignored_servers"), + self.ignored_servers, + InviteRule.IGNORE, + ) + process_field( + account_data.get("blocked_servers"), + self.blocked_servers, + InviteRule.BLOCK, + ) + + def get_invite_rule(self, user_id: str) -> InviteRule: + """Get the invite rule that matches this user. Will return InviteRule.ALLOW if no rules match + + Args: + user_id: The user ID of the inviting user. + + """ + user = UserID.from_string(user_id) + # The order here is important. We always process user rules before server rules + # and we always process in the order of Allow, Ignore, Block. + for patterns, rule in [ + (self.allowed_users, InviteRule.ALLOW), + (self.ignored_users, InviteRule.IGNORE), + (self.blocked_users, InviteRule.BLOCK), + ]: + for regex in patterns: + if regex.match(user_id): + return rule + + for patterns, rule in [ + (self.allowed_servers, InviteRule.ALLOW), + (self.ignored_servers, InviteRule.IGNORE), + (self.blocked_servers, InviteRule.BLOCK), + ]: + for regex in patterns: + if regex.match(user.domain): + return rule + + return InviteRule.ALLOW diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index aaffe5ecc9..bf087702ea 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py
@@ -607,7 +607,7 @@ def _apply_module_schema_files( "SELECT file FROM applied_module_schemas WHERE module_name = ?", (modname,), ) - applied_deltas = {d for d, in cur} + applied_deltas = {d for (d,) in cur} for name, stream in names_and_streams: if name in applied_deltas: continue @@ -710,7 +710,7 @@ def _get_or_create_schema_state( "SELECT file FROM applied_schema_deltas WHERE version >= ?", (current_version,), ) - applied_deltas = tuple(d for d, in txn) + applied_deltas = tuple(d for (d,) in txn) return _SchemaState( current_version=current_version, diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 80c9630867..9dc6c395e8 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py
@@ -40,6 +40,34 @@ class RoomsForUser: @attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) +class RoomsForUserSlidingSync: + room_id: str + sender: Optional[str] + membership: str + event_id: Optional[str] + event_pos: PersistedEventPosition + room_version_id: str + + has_known_state: bool + room_type: Optional[str] + is_encrypted: bool + + +@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) +class RoomsForUserStateReset: + """A version of `RoomsForUser` that supports optional sender and event ID + fields, to handle state resets. State resets can affect room membership + without a corresponding event so that information isn't always available.""" + + room_id: str + sender: Optional[str] + membership: str + event_id: Optional[str] + event_pos: PersistedEventPosition + room_version_id: str + + +@attr.s(slots=True, frozen=True, weakref_slot=False, auto_attribs=True) class GetRoomsForUserWithStreamOrdering: room_id: str event_pos: PersistedEventPosition diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index 581d00346b..3c3b13437e 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py
@@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2021 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -19,7 +19,7 @@ # # -SCHEMA_VERSION = 86 # remember to update the list below when updating +SCHEMA_VERSION = 92 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -142,6 +142,32 @@ Changes in SCHEMA_VERSION = 85 Changes in SCHEMA_VERSION = 86 - Add a column `authenticated` to the tables `local_media_repository` and `remote_media_cache` + +Changes in SCHEMA_VERSION = 87 + - Add tables to store Sliding Sync data for quick filtering/sorting + (`sliding_sync_joined_rooms`, `sliding_sync_membership_snapshots`) + - Add tables for storing the per-connection state for sliding sync requests: + sliding_sync_connections, sliding_sync_connection_positions, sliding_sync_connection_required_state, + sliding_sync_connection_room_configs, sliding_sync_connection_streams + +Changes in SCHEMA_VERSION = 88 + - MSC4140: Add `delayed_events` table that keeps track of events that are to + be posted in response to a resettable timeout or an on-demand action. + - Add background update to fix data integrity issue in the + `sliding_sync_membership_snapshots` -> `forgotten` column + +Changes in SCHEMA_VERSION = 89 + - Add `state_groups_pending_deletion` and `state_groups_persisting` tables. + +Changes in SCHEMA_VERSION = 90 + - Add a column `participant` to `room_memberships` table + - Add background update to delete unreferenced state groups. + +Changes in SCHEMA_VERSION = 91 + - Add a `sha256` column to the `local_media_repository` and `remote_media_cache` tables. + +Changes in SCHEMA_VERSION = 92 + - Cleaned up a trigger that was added in #18260 and then reverted. """ diff --git a/synapse/storage/schema/main/delta/25/fts.py b/synapse/storage/schema/main/delta/25/fts.py
index b050cc16a7..c01c1325cb 100644 --- a/synapse/storage/schema/main/delta/25/fts.py +++ b/synapse/storage/schema/main/delta/25/fts.py
@@ -75,8 +75,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> progress_json = json.dumps(progress) sql = ( - "INSERT into background_updates (update_name, progress_json)" - " VALUES (?, ?)" + "INSERT into background_updates (update_name, progress_json) VALUES (?, ?)" ) cur.execute(sql, ("event_search", progress_json)) diff --git a/synapse/storage/schema/main/delta/27/ts.py b/synapse/storage/schema/main/delta/27/ts.py
index d7f360b6e6..e6e73e1b77 100644 --- a/synapse/storage/schema/main/delta/27/ts.py +++ b/synapse/storage/schema/main/delta/27/ts.py
@@ -55,8 +55,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> progress_json = json.dumps(progress) sql = ( - "INSERT into background_updates (update_name, progress_json)" - " VALUES (?, ?)" + "INSERT into background_updates (update_name, progress_json) VALUES (?, ?)" ) cur.execute(sql, ("event_origin_server_ts", progress_json)) diff --git a/synapse/storage/schema/main/delta/31/search_update.py b/synapse/storage/schema/main/delta/31/search_update.py
index 0e65c9a841..46355122bb 100644 --- a/synapse/storage/schema/main/delta/31/search_update.py +++ b/synapse/storage/schema/main/delta/31/search_update.py
@@ -59,8 +59,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> progress_json = json.dumps(progress) sql = ( - "INSERT into background_updates (update_name, progress_json)" - " VALUES (?, ?)" + "INSERT into background_updates (update_name, progress_json) VALUES (?, ?)" ) cur.execute(sql, ("event_search_order", progress_json)) diff --git a/synapse/storage/schema/main/delta/33/event_fields.py b/synapse/storage/schema/main/delta/33/event_fields.py
index 9c02aeda88..53d215337e 100644 --- a/synapse/storage/schema/main/delta/33/event_fields.py +++ b/synapse/storage/schema/main/delta/33/event_fields.py
@@ -55,8 +55,7 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> progress_json = json.dumps(progress) sql = ( - "INSERT into background_updates (update_name, progress_json)" - " VALUES (?, ?)" + "INSERT into background_updates (update_name, progress_json) VALUES (?, ?)" ) cur.execute(sql, ("event_fields_sender_url", progress_json)) diff --git a/synapse/storage/schema/main/delta/56/unique_user_filter_index.py b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py
index 2461f87d77..b7535dae14 100644 --- a/synapse/storage/schema/main/delta/56/unique_user_filter_index.py +++ b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py
@@ -41,8 +41,6 @@ def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> (user_id, filter_id); DROP TABLE user_filters; ALTER TABLE user_filters_migration RENAME TO user_filters; - """ % ( - select_clause, - ) + """ % (select_clause,) execute_statements_from_stream(cur, StringIO(sql)) diff --git a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py
index 5d3578eaf4..a847ef4147 100644 --- a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py +++ b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py
@@ -23,6 +23,7 @@ This migration handles the process of changing the type of `room_depth.min_depth` to a BIGINT. """ + from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine diff --git a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py
index b4d4b6536b..9ac3d1d31f 100644 --- a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py +++ b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py
@@ -25,6 +25,7 @@ This migration adds triggers to the partial_state_events tables to enforce uniqu Triggers cannot be expressed in .sql files, so we have to use a separate file. """ + from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine diff --git a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py
index 93543fca7c..be80a6747d 100644 --- a/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py +++ b/synapse/storage/schema/main/delta/72/07force_update_current_state_events_membership.py
@@ -26,6 +26,7 @@ for its completion can be removed. Note the background job must still remain defined in the database class. """ + from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine diff --git a/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py b/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py
index 6609ef0dac..a847a93494 100644 --- a/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py +++ b/synapse/storage/schema/main/delta/74/04_membership_tables_event_stream_ordering_triggers.py
@@ -24,6 +24,7 @@ This migration adds triggers to the room membership tables to enforce consistency. Triggers cannot be expressed in .sql files, so we have to use a separate file. """ + from synapse.storage.database import LoggingTransaction from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine diff --git a/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py
index ad9c394162..1c823a3aa1 100644 --- a/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py +++ b/synapse/storage/schema/main/delta/78/03event_extremities_constraints.py
@@ -23,6 +23,7 @@ """ This migration adds foreign key constraint to `event_forward_extremities` table. """ + from synapse.storage.background_updates import ( ForeignKeyConstraint, run_validate_constraint_and_delete_rows_schema_delta, diff --git a/synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql b/synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql new file mode 100644
index 0000000000..e6db91e5b5 --- /dev/null +++ b/synapse/storage/schema/main/delta/86/02_receipts_event_id_index.sql
@@ -0,0 +1,15 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8602, 'receipts_room_id_event_id_index', '{}'); diff --git a/synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql b/synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql new file mode 100644
index 0000000000..2f71e541f8 --- /dev/null +++ b/synapse/storage/schema/main/delta/87/01_sliding_sync_memberships.sql
@@ -0,0 +1,169 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- This table is a list/queue used to keep track of which rooms need to be inserted into +-- `sliding_sync_joined_rooms`. We do this to avoid reading from `current_state_events` +-- during the background update to populate `sliding_sync_joined_rooms` which works but +-- it takes a lot of work for the database to grab `DISTINCT` room_ids given how many +-- state events there are for each room. +-- +-- This table is prefilled with every room in the `rooms` table (see the +-- `sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update` background +-- update). This table is also updated whenever we come across stale data so that we can +-- catch-up with all of the new data if Synapse was downgraded (see +-- `_resolve_stale_data_in_sliding_sync_tables`). +-- +-- FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +-- foreground update for +-- `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +-- https://github.com/element-hq/synapse/issues/17623) +CREATE TABLE IF NOT EXISTS sliding_sync_joined_rooms_to_recalculate( + room_id TEXT NOT NULL REFERENCES rooms(room_id), + PRIMARY KEY (room_id) +); + +-- A table for storing room meta data (current state relevant to sliding sync) that the +-- local server is still participating in (someone local is joined to the room). +-- +-- We store the joined rooms in separate table from `sliding_sync_membership_snapshots` +-- because we need up-to-date information for joined rooms and it can be shared across +-- everyone who is joined. +-- +-- This table is kept in sync with `current_state_events` which means if the server is +-- no longer participating in a room, the row will be deleted. +CREATE TABLE IF NOT EXISTS sliding_sync_joined_rooms( + room_id TEXT NOT NULL REFERENCES rooms(room_id), + -- The `stream_ordering` of the most-recent/latest event in the room + event_stream_ordering BIGINT NOT NULL REFERENCES events(stream_ordering), + -- The `stream_ordering` of the last event according to the `bump_event_types` + bump_stamp BIGINT, + -- `m.room.create` -> `content.type` (current state) + -- + -- Useful for the `spaces`/`not_spaces` filter in the Sliding Sync API + room_type TEXT, + -- `m.room.name` -> `content.name` (current state) + -- + -- Useful for the room meta data and `room_name_like` filter in the Sliding Sync API + room_name TEXT, + -- `m.room.encryption` -> `content.algorithm` (current state) + -- + -- Useful for the `is_encrypted` filter in the Sliding Sync API + is_encrypted BOOLEAN DEFAULT FALSE NOT NULL, + -- `m.room.tombstone` -> `content.replacement_room` (according to the current state at the + -- time of the membership). + -- + -- Useful for the `include_old_rooms` functionality in the Sliding Sync API + tombstone_successor_room_id TEXT, + PRIMARY KEY (room_id) +); + +-- So we can purge rooms easily. +-- +-- The primary key is already `room_id` + +-- So we can sort by `stream_ordering +CREATE UNIQUE INDEX IF NOT EXISTS sliding_sync_joined_rooms_event_stream_ordering ON sliding_sync_joined_rooms(event_stream_ordering); + +-- A table for storing a snapshot of room meta data (historical current state relevant +-- for sliding sync) at the time of a local user's membership. Only has rows for the +-- latest membership event for a given local user in a room which matches +-- `local_current_membership` . +-- +-- We store all memberships including joins. This makes it easy to reference this table +-- to find all membership for a given user and shares the same semantics as +-- `local_current_membership`. And we get to avoid some table maintenance; if we only +-- stored non-joins, we would have to delete the row for the user when the user joins +-- the room. Stripped state doesn't include the `m.room.tombstone` event, so we just +-- assume that the room doesn't have a tombstone. +-- +-- For remote invite/knocks where the server is not participating in the room, we will +-- use stripped state events to populate this table. We assume that if any stripped +-- state is given, it will include all possible stripped state events types. For +-- example, if stripped state is given but `m.room.encryption` isn't included, we will +-- assume that the room is not encrypted. +-- +-- We don't include `bump_stamp` here because we can just use the `stream_ordering` from +-- the membership event itself as the `bump_stamp`. +CREATE TABLE IF NOT EXISTS sliding_sync_membership_snapshots( + room_id TEXT NOT NULL REFERENCES rooms(room_id), + user_id TEXT NOT NULL, + -- Useful to be able to tell leaves from kicks (where the `user_id` is different from the `sender`) + sender TEXT NOT NULL, + membership_event_id TEXT NOT NULL REFERENCES events(event_id), + membership TEXT NOT NULL, + -- This is an integer just to match `room_memberships` and also means we don't need + -- to do any casting. + forgotten INTEGER DEFAULT 0 NOT NULL, + -- `stream_ordering` of the `membership_event_id` + event_stream_ordering BIGINT NOT NULL REFERENCES events(stream_ordering), + -- `instance_name` of the worker that persisted the `membership_event_id`. + -- Useful for crafting `PersistedEventPosition(...)` + event_instance_name TEXT NOT NULL, + -- For remote invites/knocks that don't include any stripped state, we want to be + -- able to distinguish between a room with `None` as valid value for some state and + -- room where the state is completely unknown. Basically, this should be True unless + -- no stripped state was provided for a remote invite/knock (False). + has_known_state BOOLEAN DEFAULT FALSE NOT NULL, + -- `m.room.create` -> `content.type` (according to the current state at the time of + -- the membership). + -- + -- Useful for the `spaces`/`not_spaces` filter in the Sliding Sync API + room_type TEXT, + -- `m.room.name` -> `content.name` (according to the current state at the time of + -- the membership). + -- + -- Useful for the room meta data and `room_name_like` filter in the Sliding Sync API + room_name TEXT, + -- `m.room.encryption` -> `content.algorithm` (according to the current state at the + -- time of the membership). + -- + -- Useful for the `is_encrypted` filter in the Sliding Sync API + is_encrypted BOOLEAN DEFAULT FALSE NOT NULL, + -- `m.room.tombstone` -> `content.replacement_room` (according to the current state at the + -- time of the membership). + -- + -- Useful for the `include_old_rooms` functionality in the Sliding Sync API + tombstone_successor_room_id TEXT, + PRIMARY KEY (room_id, user_id) +); + +-- So we can purge rooms easily. +-- +-- Since we're using a multi-column index as the primary key (room_id, user_id), the +-- first index column (room_id) is always usable for searching so we don't need to +-- create a separate index for it. +-- +-- CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_room_id ON sliding_sync_membership_snapshots(room_id); + +-- So we can fetch all rooms for a given user +CREATE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_user_id ON sliding_sync_membership_snapshots(user_id); +-- So we can sort by `stream_ordering +CREATE UNIQUE INDEX IF NOT EXISTS sliding_sync_membership_snapshots_event_stream_ordering ON sliding_sync_membership_snapshots(event_stream_ordering); + + +-- Add a series of background updates to populate the new `sliding_sync_joined_rooms` table: +-- +-- 1. Add a background update to prefill `sliding_sync_joined_rooms_to_recalculate`. +-- We do a one-shot bulk insert from the `rooms` table to prefill. +-- 2. Add a background update to populate the new `sliding_sync_joined_rooms` table +-- based on the rooms listed in the `sliding_sync_joined_rooms_to_recalculate` +-- table. +-- +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8701, 'sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update', '{}'); +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (8701, 'sliding_sync_joined_rooms_bg_update', '{}', 'sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update'); + +-- Add a background updates to populate the new `sliding_sync_membership_snapshots` table +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8701, 'sliding_sync_membership_snapshots_bg_update', '{}'); diff --git a/synapse/storage/schema/main/delta/87/02_per_connection_state.sql b/synapse/storage/schema/main/delta/87/02_per_connection_state.sql new file mode 100644
index 0000000000..59bc14a2c9 --- /dev/null +++ b/synapse/storage/schema/main/delta/87/02_per_connection_state.sql
@@ -0,0 +1,81 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + + +-- Table to track active sliding sync connections. +-- +-- A new connection will be created for every sliding sync request without a +-- `since` token for a given `conn_id` for a device.# +-- +-- Once a new connection is created and used we delete all other connections for +-- the `conn_id`. +CREATE TABLE sliding_sync_connections( + connection_key $%AUTO_INCREMENT_PRIMARY_KEY%$, + user_id TEXT NOT NULL, + -- Generally the device ID, but may be something else for e.g. puppeted accounts. + effective_device_id TEXT NOT NULL, + conn_id TEXT NOT NULL, + created_ts BIGINT NOT NULL +); + +CREATE INDEX sliding_sync_connections_idx ON sliding_sync_connections(user_id, effective_device_id, conn_id); +CREATE INDEX sliding_sync_connections_ts_idx ON sliding_sync_connections(created_ts); + +-- We track per-connection state by associating changes to the state with +-- connection positions. This ensures that we correctly track state even if we +-- see retries of requests. +-- +-- If the client starts a "new" connection (by not specifying a since token), +-- we'll clear out the other connections (to ensure that we don't end up with +-- lots of connection keys). +CREATE TABLE sliding_sync_connection_positions( + connection_position $%AUTO_INCREMENT_PRIMARY_KEY%$, + connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE, + created_ts BIGINT NOT NULL +); + +CREATE INDEX sliding_sync_connection_positions_key ON sliding_sync_connection_positions(connection_key); +CREATE INDEX sliding_sync_connection_positions_ts_idx ON sliding_sync_connection_positions(created_ts); + + +-- To save space we deduplicate the `required_state` json by assigning IDs to +-- different values. +CREATE TABLE sliding_sync_connection_required_state( + required_state_id $%AUTO_INCREMENT_PRIMARY_KEY%$, + connection_key BIGINT NOT NULL REFERENCES sliding_sync_connections(connection_key) ON DELETE CASCADE, + required_state TEXT NOT NULL -- We store this as a json list of event type / state key tuples. +); + +CREATE INDEX sliding_sync_connection_required_state_conn_pos ON sliding_sync_connection_required_state(connection_key); + + +-- Stores the room configs we have seen for rooms in a connection. +CREATE TABLE sliding_sync_connection_room_configs( + connection_position BIGINT NOT NULL REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE, + room_id TEXT NOT NULL, + timeline_limit BIGINT NOT NULL, + required_state_id BIGINT NOT NULL REFERENCES sliding_sync_connection_required_state(required_state_id) +); + +CREATE UNIQUE INDEX sliding_sync_connection_room_configs_idx ON sliding_sync_connection_room_configs(connection_position, room_id); + +-- Stores what data we have sent for given streams down given connections. +CREATE TABLE sliding_sync_connection_streams( + connection_position BIGINT NOT NULL REFERENCES sliding_sync_connection_positions(connection_position) ON DELETE CASCADE, + stream TEXT NOT NULL, -- e.g. "events" or "receipts" + room_id TEXT NOT NULL, + room_status TEXT NOT NULL, -- "live" or "previously", i.e. the `HaveSentRoomFlag` value + last_token TEXT -- For "previously" the token for the stream we have sent up to. +); + +CREATE UNIQUE INDEX sliding_sync_connection_streams_idx ON sliding_sync_connection_streams(connection_position, room_id, stream); diff --git a/synapse/storage/schema/main/delta/87/03_current_state_index.sql b/synapse/storage/schema/main/delta/87/03_current_state_index.sql new file mode 100644
index 0000000000..76b974271c --- /dev/null +++ b/synapse/storage/schema/main/delta/87/03_current_state_index.sql
@@ -0,0 +1,19 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + + +-- Add a background updates to add a new index: +-- `current_state_events(room_id, membership) WHERE type = 'm.room.member' +-- This makes counting membership in rooms (for syncs) much faster +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8701, 'current_state_events_members_room_index', '{}'); diff --git a/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql b/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql new file mode 100644
index 0000000000..78ba5129af --- /dev/null +++ b/synapse/storage/schema/main/delta/88/01_add_delayed_events.sql
@@ -0,0 +1,43 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +CREATE TABLE delayed_events ( + delay_id TEXT NOT NULL, + user_localpart TEXT NOT NULL, + device_id TEXT, + delay BIGINT NOT NULL, + send_ts BIGINT NOT NULL, + room_id TEXT NOT NULL, + event_type TEXT NOT NULL, + state_key TEXT, + origin_server_ts BIGINT, + content bytea NOT NULL, + is_processed BOOLEAN NOT NULL DEFAULT FALSE, + PRIMARY KEY (user_localpart, delay_id) +); + +CREATE INDEX delayed_events_send_ts ON delayed_events (send_ts); +CREATE INDEX delayed_events_is_processed ON delayed_events (is_processed); +CREATE INDEX delayed_events_room_state_event_idx ON delayed_events (room_id, event_type, state_key) WHERE state_key IS NOT NULL; + +CREATE TABLE delayed_events_stream_pos ( + Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row. + stream_id BIGINT NOT NULL, + CHECK (Lock='X') +); + +-- Start processing events from the point this migration was run, rather +-- than the beginning of time. +INSERT INTO delayed_events_stream_pos ( + stream_id +) SELECT COALESCE(MAX(stream_ordering), 0) from events; diff --git a/synapse/storage/schema/main/delta/88/01_custom_profile_fields.sql b/synapse/storage/schema/main/delta/88/01_custom_profile_fields.sql new file mode 100644
index 0000000000..63cbd7ffa9 --- /dev/null +++ b/synapse/storage/schema/main/delta/88/01_custom_profile_fields.sql
@@ -0,0 +1,15 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 Patrick Cloke +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Custom profile fields. +ALTER TABLE profiles ADD COLUMN fields JSONB; diff --git a/synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql b/synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql new file mode 100644
index 0000000000..4de46af2fc --- /dev/null +++ b/synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql
@@ -0,0 +1,21 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Add a background update to update the `sliding_sync_membership_snapshots` -> +-- `forgotten` column to be in sync with the `room_memberships` table. +-- +-- For any room that someone has forgotten and subsequently re-joined or had any new +-- membership on, we need to go and update the column to match the `room_memberships` +-- table as it has fallen out of sync. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8802, 'sliding_sync_membership_snapshots_fix_forgotten_column_bg_update', '{}'); diff --git a/synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql b/synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql new file mode 100644
index 0000000000..7712ea68ad --- /dev/null +++ b/synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql
@@ -0,0 +1,18 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + + +-- Add an index on (user_id, device_id, algorithm, ts_added_ms) on e2e_one_time_keys_json, so that OTKs can +-- efficiently be issued in the same order they were uploaded. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8803, 'add_otk_ts_added_index', '{}'); diff --git a/synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql b/synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql new file mode 100644
index 0000000000..0ee78df1a0 --- /dev/null +++ b/synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql
@@ -0,0 +1,18 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + + +-- Add an index on `current_state_delta_stream(room_id, stream_id)` to allow +-- efficient per-room lookups. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8804, 'current_state_delta_stream_room_index', '{}'); diff --git a/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres b/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres new file mode 100644
index 0000000000..93a68836ee --- /dev/null +++ b/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres
@@ -0,0 +1,19 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility +-- that it could still have old OTKs that the client has dropped. +-- +-- We create a scheduled task which will drop old OTKs, to flush them out. +INSERT INTO scheduled_tasks(id, action, status, timestamp) + VALUES ('delete_old_otks_task', 'delete_old_otks', 'scheduled', extract(epoch from current_timestamp) * 1000); diff --git a/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite b/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite new file mode 100644
index 0000000000..cdc2b5d211 --- /dev/null +++ b/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite
@@ -0,0 +1,19 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility +-- that it could still have old OTKs that the client has dropped. +-- +-- We create a scheduled task which will drop old OTKs, to flush them out. +INSERT INTO scheduled_tasks(id, action, status, timestamp) + VALUES ('delete_old_otks_task', 'delete_old_otks', 'scheduled', strftime('%s', 'now') * 1000); diff --git a/synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql b/synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql new file mode 100644
index 0000000000..7b2e18a84b --- /dev/null +++ b/synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql
@@ -0,0 +1,20 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + + +-- Add an index on sliding_sync_connection_room_configs(required_state_id), so +-- that when we delete entries in `sliding_sync_connection_required_state` it's +-- efficient for Postgres to check they've been deleted from +-- `sliding_sync_connection_room_configs` too +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8805, 'sliding_sync_connection_room_configs_required_state_id_idx', '{}'); diff --git a/synapse/storage/schema/main/delta/88/06_events_received_ts_index.sql b/synapse/storage/schema/main/delta/88/06_events_received_ts_index.sql new file mode 100644
index 0000000000..d70a4a8dbc --- /dev/null +++ b/synapse/storage/schema/main/delta/88/06_events_received_ts_index.sql
@@ -0,0 +1,17 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Add an index on `events.received_ts` for `m.room.member` events to allow for +-- efficient lookup of events by timestamp in some Admin API's +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8806, 'events_received_ts_index', '{}'); diff --git a/synapse/storage/schema/main/delta/89/01_sliding_sync_membership_snapshot_index.sql b/synapse/storage/schema/main/delta/89/01_sliding_sync_membership_snapshot_index.sql new file mode 100644
index 0000000000..7799cffdce --- /dev/null +++ b/synapse/storage/schema/main/delta/89/01_sliding_sync_membership_snapshot_index.sql
@@ -0,0 +1,15 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8901, 'sliding_sync_membership_snapshots_membership_event_id_idx', '{}'); diff --git a/synapse/storage/schema/main/delta/90/01_add_column_participant_room_memberships_table.sql b/synapse/storage/schema/main/delta/90/01_add_column_participant_room_memberships_table.sql new file mode 100644
index 0000000000..dafd046499 --- /dev/null +++ b/synapse/storage/schema/main/delta/90/01_add_column_participant_room_memberships_table.sql
@@ -0,0 +1,16 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Add a column `participant` to `room_memberships` table to track whether a room member has sent +-- a `m.room.message` or `m.room.encrypted` event into a room they are a member of +ALTER TABLE room_memberships ADD COLUMN participant BOOLEAN DEFAULT FALSE; \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/91/01_media_hash.sql b/synapse/storage/schema/main/delta/91/01_media_hash.sql new file mode 100644
index 0000000000..34a372f1ed --- /dev/null +++ b/synapse/storage/schema/main/delta/91/01_media_hash.sql
@@ -0,0 +1,28 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Store the SHA256 content hash of media files. +ALTER TABLE local_media_repository ADD COLUMN sha256 TEXT; +ALTER TABLE remote_media_cache ADD COLUMN sha256 TEXT; + +-- Add a background updates to handle creating the new index. +-- +-- Note that the ordering of the update is not following the usual scheme. This +-- is because when upgrading from Synapse 1.127, this index is fairly important +-- to have up quickly, so that it doesn't tank performance, which is why it is +-- scheduled before other background updates in the 1.127 -> 1.128 upgrade +INSERT INTO + background_updates (ordering, update_name, progress_json) +VALUES + (8890, 'local_media_repository_sha256_idx', '{}'), + (8891, 'remote_media_cache_sha256_idx', '{}'); diff --git a/synapse/storage/schema/main/delta/92/01_remove_trigger.sql.postgres b/synapse/storage/schema/main/delta/92/01_remove_trigger.sql.postgres new file mode 100644
index 0000000000..e9f160cdcc --- /dev/null +++ b/synapse/storage/schema/main/delta/92/01_remove_trigger.sql.postgres
@@ -0,0 +1,16 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Removes the trigger that was added in #18260 and then reverted +DROP TRIGGER IF EXISTS event_stats_increment_counts_trigger ON events; +DROP FUNCTION IF EXISTS event_stats_increment_counts(); diff --git a/synapse/storage/schema/main/delta/92/01_remove_trigger.sql.sqlite b/synapse/storage/schema/main/delta/92/01_remove_trigger.sql.sqlite new file mode 100644
index 0000000000..b5f084dde8 --- /dev/null +++ b/synapse/storage/schema/main/delta/92/01_remove_trigger.sql.sqlite
@@ -0,0 +1,16 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Removes the trigger that was added in #18260 and then reverted +DROP TRIGGER IF EXISTS event_stats_events_insert_trigger; +DROP TRIGGER IF EXISTS event_stats_events_delete_trigger; diff --git a/synapse/storage/schema/main/delta/92/02_remove_populate_participant_bg_update.sql b/synapse/storage/schema/main/delta/92/02_remove_populate_participant_bg_update.sql new file mode 100644
index 0000000000..e1f377c37d --- /dev/null +++ b/synapse/storage/schema/main/delta/92/02_remove_populate_participant_bg_update.sql
@@ -0,0 +1,17 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Remove the background update if it was scheduled, as it is not rollback-safe +-- See https://github.com/element-hq/synapse/issues/18356 for context +DELETE FROM background_updates +WHERE update_name = 'populate_participant_bg_update'; \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql b/synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql new file mode 100644
index 0000000000..6f5b7cb06e --- /dev/null +++ b/synapse/storage/schema/main/delta/92/04_ss_membership_snapshot_idx.sql
@@ -0,0 +1,16 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- So we can fetch all rooms for a given user sorted by stream order +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (9204, 'sliding_sync_membership_snapshots_user_id_stream_ordering', '{}'); diff --git a/synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql b/synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql new file mode 100644
index 0000000000..c1ebf8b58b --- /dev/null +++ b/synapse/storage/schema/main/delta/92/05_fixup_max_depth_cap.sql
@@ -0,0 +1,17 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Background update that fixes any events with a topological ordering above the +-- MAX_DEPTH value. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (9205, 'fixup_max_depth_cap', '{}'); diff --git a/synapse/storage/schema/state/delta/89/01_state_groups_deletion.sql b/synapse/storage/schema/state/delta/89/01_state_groups_deletion.sql new file mode 100644
index 0000000000..d4cb27a3a2 --- /dev/null +++ b/synapse/storage/schema/state/delta/89/01_state_groups_deletion.sql
@@ -0,0 +1,39 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- See the `StateDeletionDataStore` for details of these tables. + +-- We add state groups to this table when we want to later delete them. The +-- `insertion_ts` column indicates when the state group was proposed for +-- deletion (rather than when it should be deleted). +CREATE TABLE IF NOT EXISTS state_groups_pending_deletion ( + sequence_number $%AUTO_INCREMENT_PRIMARY_KEY%$, + state_group BIGINT NOT NULL, + insertion_ts BIGINT NOT NULL +); + +CREATE UNIQUE INDEX state_groups_pending_deletion_state_group ON state_groups_pending_deletion(state_group); +CREATE INDEX state_groups_pending_deletion_insertion_ts ON state_groups_pending_deletion(insertion_ts); + + +-- Holds the state groups the worker is currently persisting. +-- +-- The `sequence_number` column of the `state_groups_pending_deletion` table +-- *must* be updated whenever a state group may have become referenced. +CREATE TABLE IF NOT EXISTS state_groups_persisting ( + state_group BIGINT NOT NULL, + instance_name TEXT NOT NULL, + PRIMARY KEY (state_group, instance_name) +); + +CREATE INDEX state_groups_persisting_instance_name ON state_groups_persisting(instance_name); diff --git a/synapse/storage/schema/state/delta/90/02_delete_unreferenced_state_groups.sql b/synapse/storage/schema/state/delta/90/02_delete_unreferenced_state_groups.sql new file mode 100644
index 0000000000..55a038e2b8 --- /dev/null +++ b/synapse/storage/schema/state/delta/90/02_delete_unreferenced_state_groups.sql
@@ -0,0 +1,16 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Add a background update to delete any unreferenced state groups +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (9002, 'mark_unreferenced_state_groups_for_deletion_bg_update', '{}'); diff --git a/synapse/storage/schema/state/delta/90/03_remove_old_deletion_bg_update.sql b/synapse/storage/schema/state/delta/90/03_remove_old_deletion_bg_update.sql new file mode 100644
index 0000000000..1cc6d612b6 --- /dev/null +++ b/synapse/storage/schema/state/delta/90/03_remove_old_deletion_bg_update.sql
@@ -0,0 +1,15 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- <https://www.gnu.org/licenses/agpl-3.0.html>. + +-- Remove the old unreferenced state group deletion background update if it exists +DELETE FROM background_updates WHERE update_name = 'delete_unreferenced_state_groups_bg_update'; diff --git a/synapse/storage/types.py b/synapse/storage/types.py
index 74f60cc590..4329d88c9a 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py
@@ -26,14 +26,13 @@ from typing import ( List, Mapping, Optional, + Protocol, Sequence, Tuple, Type, Union, ) -from typing_extensions import Protocol - """ Some very basic protocol definitions for the DB-API2 classes specified in PEP-249 """ diff --git a/synapse/synapse_rust/events.pyi b/synapse/synapse_rust/events.pyi
index 1682d0d151..7d3422572d 100644 --- a/synapse/synapse_rust/events.pyi +++ b/synapse/synapse_rust/events.pyi
@@ -10,7 +10,7 @@ # See the GNU Affero General Public License for more details: # <https://www.gnu.org/licenses/agpl-3.0.html>. -from typing import Optional +from typing import List, Mapping, Optional, Tuple from synapse.types import JsonDict @@ -105,3 +105,29 @@ class EventInternalMetadata: def is_notifiable(self) -> bool: """Whether this event can trigger a push notification""" + +def event_visible_to_server( + sender: str, + target_server_name: str, + history_visibility: str, + erased_senders: Mapping[str, bool], + partial_state_invisible: bool, + memberships: List[Tuple[str, str]], +) -> bool: + """Determine whether the server is allowed to see the unredacted event. + + Args: + sender: The sender of the event. + target_server_name: The server we want to send the event to. + history_visibility: The history_visibility value at the event. + erased_senders: A mapping of users and whether they have requested erasure. If a + user is not in the map, it is treated as though they haven't requested erasure. + partial_state_invisible: Whether the event should be treated as invisible due to + the partial state status of the room. + memberships: A list of membership state information at the event for users + matching the `target_server_name`. Each list item must contain a tuple of + (state_key, membership). + + Returns: + Whether the server is allowed to see the unredacted event. + """ diff --git a/synapse/synapse_rust/push.pyi b/synapse/synapse_rust/push.pyi
index 27a974e1bb..3f317c3288 100644 --- a/synapse/synapse_rust/push.pyi +++ b/synapse/synapse_rust/push.pyi
@@ -48,6 +48,7 @@ class FilteredPushRules: msc3381_polls_enabled: bool, msc3664_enabled: bool, msc4028_push_encrypted_events: bool, + msc4210_enabled: bool, ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... @@ -65,6 +66,7 @@ class PushRuleEvaluator: related_event_match_enabled: bool, room_version_feature_flags: Tuple[str, ...], msc3931_enabled: bool, + msc4210_enabled: bool, ): ... def run( self, diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py
index 5259550f1c..5549f3c9f8 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py
@@ -40,6 +40,7 @@ from typing import ( Set, Tuple, Type, + TypedDict, TypeVar, Union, overload, @@ -49,7 +50,7 @@ import attr from immutabledict import immutabledict from signedjson.key import decode_verify_key_bytes from signedjson.types import VerifyKey -from typing_extensions import Self, TypedDict +from typing_extensions import Self from unpaddedbase64 import decode_base64 from zope.interface import Interface @@ -664,6 +665,11 @@ class RoomStreamToken(AbstractMultiWriterStreamToken): @classmethod async def parse(cls, store: "PurgeEventsStore", string: str) -> "RoomStreamToken": + # Check that it looks like a Synapse token first. We do this so that + # we don't log at the exception-level for obviously incorrect tokens. + if not string or string[0] not in ("s", "t", "m"): + raise SynapseError(400, f"Invalid room stream token {string:!r}") + try: if string[0] == "s": return cls(topological=None, stream=int(string[1:])) @@ -883,8 +889,7 @@ class MultiWriterStreamToken(AbstractMultiWriterStreamToken): def __str__(self) -> str: instances = ", ".join(f"{k}: {v}" for k, v in sorted(self.instance_map.items())) return ( - f"MultiWriterStreamToken(stream: {self.stream}, " - f"instances: {{{instances}}})" + f"MultiWriterStreamToken(stream: {self.stream}, instances: {{{instances}}})" ) @@ -1308,7 +1313,7 @@ class DeviceListUpdates: def get_verify_key_from_cross_signing_key( - key_info: Mapping[str, Any] + key_info: Mapping[str, Any], ) -> Tuple[str, VerifyKey]: """Get the key ID and signedjson verify key from a cross-signing key dict diff --git a/synapse/types/handlers/__init__.py b/synapse/types/handlers/__init__.py
index 363f060bef..f2fbc1dddf 100644 --- a/synapse/types/handlers/__init__.py +++ b/synapse/types/handlers/__init__.py
@@ -17,33 +17,23 @@ # [This file includes modifications made by New Vector Limited] # # -from enum import Enum -from typing import TYPE_CHECKING, Dict, Final, List, Mapping, Optional, Sequence, Tuple -import attr -from typing_extensions import TypedDict -from synapse._pydantic_compat import HAS_PYDANTIC_V2 +from typing import List, Optional, TypedDict -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import Extra -else: - from pydantic import Extra +from synapse.api.constants import EventTypes -from synapse.events import EventBase -from synapse.types import ( - DeviceListUpdates, - JsonDict, - JsonMapping, - Requester, - SlidingSyncStreamToken, - StreamToken, - UserID, -) -from synapse.types.rest.client import SlidingSyncBody - -if TYPE_CHECKING: - from synapse.handlers.relations import BundledAggregations +# Sliding Sync: The event types that clients should consider as new activity and affect +# the `bump_stamp` +SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES = { + EventTypes.Create, + EventTypes.Message, + EventTypes.Encrypted, + EventTypes.Sticker, + EventTypes.CallInvite, + EventTypes.PollStart, + EventTypes.LiveLocationShareStart, +} class ShutdownRoomParams(TypedDict): @@ -101,331 +91,3 @@ class ShutdownRoomResponse(TypedDict): failed_to_kick_users: List[str] local_aliases: List[str] new_room_id: Optional[str] - - -class SlidingSyncConfig(SlidingSyncBody): - """ - Inherit from `SlidingSyncBody` since we need all of the same fields and add a few - extra fields that we need in the handler - """ - - user: UserID - requester: Requester - - # Pydantic config - class Config: - # By default, ignore fields that we don't recognise. - extra = Extra.ignore - # By default, don't allow fields to be reassigned after parsing. - allow_mutation = False - # Allow custom types like `UserID` to be used in the model - arbitrary_types_allowed = True - - -class OperationType(Enum): - """ - Represents the operation types in a Sliding Sync window. - - Attributes: - SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about - entries in this range. - INSERT: Sets a single entry. If the position is not empty then clients MUST move - entries to the left or the right depending on where the closest empty space is. - DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move - places. - INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for - offline support, but they should be treated as empty when additional operations - which concern indexes in the range arrive from the server. - """ - - SYNC: Final = "SYNC" - INSERT: Final = "INSERT" - DELETE: Final = "DELETE" - INVALIDATE: Final = "INVALIDATE" - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class SlidingSyncResult: - """ - The Sliding Sync result to be serialized to JSON for a response. - - Attributes: - next_pos: The next position token in the sliding window to request (next_batch). - lists: Sliding window API. A map of list key to list results. - rooms: Room subscription API. A map of room ID to room results. - extensions: Extensions API. A map of extension key to extension results. - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class RoomResult: - """ - Attributes: - name: Room name or calculated room name. - avatar: Room avatar - heroes: List of stripped membership events (containing `user_id` and optionally - `avatar_url` and `displayname`) for the users used to calculate the room name. - is_dm: Flag to specify whether the room is a direct-message room (most likely - between two people). - initial: Flag which is set when this is the first time the server is sending this - data on this connection. Clients can use this flag to replace or update - their local state. When there is an update, servers MUST omit this flag - entirely and NOT send "initial":false as this is wasteful on bandwidth. The - absence of this flag means 'false'. - required_state: The current state of the room - timeline: Latest events in the room. The last event is the most recent. - bundled_aggregations: A mapping of event ID to the bundled aggregations for - the timeline events above. This allows clients to show accurate reaction - counts (or edits, threads), even if some of the reaction events were skipped - over in a gappy sync. - stripped_state: Stripped state events (for rooms where the usre is - invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2, - absent on joined/left rooms - prev_batch: A token that can be passed as a start parameter to the - `/rooms/<room_id>/messages` API to retrieve earlier messages. - limited: True if there are more events than `timeline_limit` looking - backwards from the `response.pos` to the `request.pos`. - num_live: The number of timeline events which have just occurred and are not historical. - The last N events are 'live' and should be treated as such. This is mostly - useful to determine whether a given @mention event should make a noise or not. - Clients cannot rely solely on the absence of `initial: true` to determine live - events because if a room not in the sliding window bumps into the window because - of an @mention it will have `initial: true` yet contain a single live event - (with potentially other old events in the timeline). - bump_stamp: The `stream_ordering` of the last event according to the - `bump_event_types`. This helps clients sort more readily without them - needing to pull in a bunch of the timeline to determine the last activity. - `bump_event_types` is a thing because for example, we don't want display - name changes to mark the room as unread and bump it to the top. For - encrypted rooms, we just have to consider any activity as a bump because we - can't see the content and the client has to figure it out for themselves. - joined_count: The number of users with membership of join, including the client's - own user ID. (same as sync `v2 m.joined_member_count`) - invited_count: The number of users with membership of invite. (same as sync v2 - `m.invited_member_count`) - notification_count: The total number of unread notifications for this room. (same - as sync v2) - highlight_count: The number of unread notifications for this room with the highlight - flag set. (same as sync v2) - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class StrippedHero: - user_id: str - display_name: Optional[str] - avatar_url: Optional[str] - - name: Optional[str] - avatar: Optional[str] - heroes: Optional[List[StrippedHero]] - is_dm: bool - initial: bool - # Should be empty for invite/knock rooms with `stripped_state` - required_state: List[EventBase] - # Should be empty for invite/knock rooms with `stripped_state` - timeline_events: List[EventBase] - bundled_aggregations: Optional[Dict[str, "BundledAggregations"]] - # Optional because it's only relevant to invite/knock rooms - stripped_state: List[JsonDict] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - prev_batch: Optional[StreamToken] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - limited: Optional[bool] - # Only optional because it won't be included for invite/knock rooms with `stripped_state` - num_live: Optional[int] - bump_stamp: int - joined_count: int - invited_count: int - notification_count: int - highlight_count: int - - def __bool__(self) -> bool: - return ( - # If this is the first time the client is seeing the room, we should not filter it out - # under any circumstance. - self.initial - # We need to let the client know if there are any new events - or bool(self.required_state) - or bool(self.timeline_events) - or bool(self.stripped_state) - ) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class SlidingWindowList: - """ - Attributes: - count: The total number of entries in the list. Always present if this list - is. - ops: The sliding list operations to perform. - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class Operation: - """ - Attributes: - op: The operation type to perform. - range: Which index positions are affected by this operation. These are - both inclusive. - room_ids: Which room IDs are affected by this operation. These IDs match - up to the positions in the `range`, so the last room ID in this list - matches the 9th index. The room data is held in a separate object. - """ - - op: OperationType - range: Tuple[int, int] - room_ids: List[str] - - count: int - ops: List[Operation] - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class Extensions: - """Responses for extensions - - Attributes: - to_device: The to-device extension (MSC3885) - e2ee: The E2EE device extension (MSC3884) - """ - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class ToDeviceExtension: - """The to-device extension (MSC3885) - - Attributes: - next_batch: The to-device stream token the client should use - to get more results - events: A list of to-device messages for the client - """ - - next_batch: str - events: Sequence[JsonMapping] - - def __bool__(self) -> bool: - return bool(self.events) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class E2eeExtension: - """The E2EE device extension (MSC3884) - - Attributes: - device_list_updates: List of user_ids whose devices have changed or left (only - present on incremental syncs). - device_one_time_keys_count: Map from key algorithm to the number of - unclaimed one-time keys currently held on the server for this device. If - an algorithm is unlisted, the count for that algorithm is assumed to be - zero. If this entire parameter is missing, the count for all algorithms - is assumed to be zero. - device_unused_fallback_key_types: List of unused fallback key algorithms - for this device. - """ - - # Only present on incremental syncs - device_list_updates: Optional[DeviceListUpdates] - device_one_time_keys_count: Mapping[str, int] - device_unused_fallback_key_types: Sequence[str] - - def __bool__(self) -> bool: - # Note that "signed_curve25519" is always returned in key count responses - # regardless of whether we uploaded any keys for it. This is necessary until - # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. - # - # Also related: - # https://github.com/element-hq/element-android/issues/3725 and - # https://github.com/matrix-org/synapse/issues/10456 - default_otk = self.device_one_time_keys_count.get("signed_curve25519") - more_than_default_otk = len(self.device_one_time_keys_count) > 1 or ( - default_otk is not None and default_otk > 0 - ) - - return bool( - more_than_default_otk - or self.device_list_updates - or self.device_unused_fallback_key_types - ) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class AccountDataExtension: - """The Account Data extension (MSC3959) - - Attributes: - global_account_data_map: Mapping from `type` to `content` of global account - data events. - account_data_by_room_map: Mapping from room_id to mapping of `type` to - `content` of room account data events. - """ - - global_account_data_map: Mapping[str, JsonMapping] - account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] - - def __bool__(self) -> bool: - return bool( - self.global_account_data_map or self.account_data_by_room_map - ) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class ReceiptsExtension: - """The Receipts extension (MSC3960) - - Attributes: - room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral - event (type, content) - """ - - room_id_to_receipt_map: Mapping[str, JsonMapping] - - def __bool__(self) -> bool: - return bool(self.room_id_to_receipt_map) - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class TypingExtension: - """The Typing Notification extension (MSC3961) - - Attributes: - room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral - event (type, content) - """ - - room_id_to_typing_map: Mapping[str, JsonMapping] - - def __bool__(self) -> bool: - return bool(self.room_id_to_typing_map) - - to_device: Optional[ToDeviceExtension] = None - e2ee: Optional[E2eeExtension] = None - account_data: Optional[AccountDataExtension] = None - receipts: Optional[ReceiptsExtension] = None - typing: Optional[TypingExtension] = None - - def __bool__(self) -> bool: - return bool( - self.to_device - or self.e2ee - or self.account_data - or self.receipts - or self.typing - ) - - next_pos: SlidingSyncStreamToken - lists: Dict[str, SlidingWindowList] - rooms: Dict[str, RoomResult] - extensions: Extensions - - def __bool__(self) -> bool: - """Make the result appear empty if there are no updates. This is used - to tell if the notifier needs to wait for more events when polling for - events. - """ - # We don't include `self.lists` here, as a) `lists` is always non-empty even if - # there are no changes, and b) since we're sorting rooms by `stream_ordering` of - # the latest activity, anything that would cause the order to change would end - # up in `self.rooms` and cause us to send down the change. - return bool(self.rooms or self.extensions) - - @staticmethod - def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult": - "Return a new empty result" - return SlidingSyncResult( - next_pos=next_pos, - lists={}, - rooms={}, - extensions=SlidingSyncResult.Extensions(), - ) diff --git a/synapse/types/handlers/policy_server.py b/synapse/types/handlers/policy_server.py new file mode 100644
index 0000000000..bfc09dabf4 --- /dev/null +++ b/synapse/types/handlers/policy_server.py
@@ -0,0 +1,16 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +RECOMMENDATION_OK = "ok" +RECOMMENDATION_SPAM = "spam" diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py new file mode 100644
index 0000000000..3ebd334a6d --- /dev/null +++ b/synapse/types/handlers/sliding_sync.py
@@ -0,0 +1,875 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +import logging +import typing +from collections import ChainMap +from enum import Enum +from typing import ( + TYPE_CHECKING, + AbstractSet, + Any, + Callable, + Dict, + Final, + Generic, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Set, + Tuple, + TypeVar, + cast, +) + +import attr + +from synapse._pydantic_compat import Extra +from synapse.api.constants import EventTypes +from synapse.events import EventBase +from synapse.types import ( + DeviceListUpdates, + JsonDict, + JsonMapping, + MultiWriterStreamToken, + Requester, + RoomStreamToken, + SlidingSyncStreamToken, + StrCollection, + StreamToken, + UserID, +) +from synapse.types.rest.client import SlidingSyncBody + +if TYPE_CHECKING: + from synapse.handlers.relations import BundledAggregations + +logger = logging.getLogger(__name__) + + +class SlidingSyncConfig(SlidingSyncBody): + """ + Inherit from `SlidingSyncBody` since we need all of the same fields and add a few + extra fields that we need in the handler + """ + + user: UserID + requester: Requester + + # Pydantic config + class Config: + # By default, ignore fields that we don't recognise. + extra = Extra.ignore + # By default, don't allow fields to be reassigned after parsing. + allow_mutation = False + # Allow custom types like `UserID` to be used in the model + arbitrary_types_allowed = True + + +class OperationType(Enum): + """ + Represents the operation types in a Sliding Sync window. + + Attributes: + SYNC: Sets a range of entries. Clients SHOULD discard what they previous knew about + entries in this range. + INSERT: Sets a single entry. If the position is not empty then clients MUST move + entries to the left or the right depending on where the closest empty space is. + DELETE: Remove a single entry. Often comes before an INSERT to allow entries to move + places. + INVALIDATE: Remove a range of entries. Clients MAY persist the invalidated range for + offline support, but they should be treated as empty when additional operations + which concern indexes in the range arrive from the server. + """ + + SYNC: Final = "SYNC" + INSERT: Final = "INSERT" + DELETE: Final = "DELETE" + INVALIDATE: Final = "INVALIDATE" + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SlidingSyncResult: + """ + The Sliding Sync result to be serialized to JSON for a response. + + Attributes: + next_pos: The next position token in the sliding window to request (next_batch). + lists: Sliding window API. A map of list key to list results. + rooms: Room subscription API. A map of room ID to room results. + extensions: Extensions API. A map of extension key to extension results. + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class RoomResult: + """ + Attributes: + name: Room name or calculated room name. + avatar: Room avatar + heroes: List of stripped membership events (containing `user_id` and optionally + `avatar_url` and `displayname`) for the users used to calculate the room name. + is_dm: Flag to specify whether the room is a direct-message room (most likely + between two people). + initial: Flag which is set when this is the first time the server is sending this + data on this connection. Clients can use this flag to replace or update + their local state. When there is an update, servers MUST omit this flag + entirely and NOT send "initial":false as this is wasteful on bandwidth. The + absence of this flag means 'false'. + unstable_expanded_timeline: Flag which is set if we're returning more historic + events due to the timeline limit having increased. See "XXX: Odd behavior" + comment ing `synapse.handlers.sliding_sync`. + required_state: The current state of the room + timeline: Latest events in the room. The last event is the most recent. + bundled_aggregations: A mapping of event ID to the bundled aggregations for + the timeline events above. This allows clients to show accurate reaction + counts (or edits, threads), even if some of the reaction events were skipped + over in a gappy sync. + stripped_state: Stripped state events (for rooms where the usre is + invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2, + absent on joined/left rooms + prev_batch: A token that can be passed as a start parameter to the + `/rooms/<room_id>/messages` API to retrieve earlier messages. + limited: True if there are more events than `timeline_limit` looking + backwards from the `response.pos` to the `request.pos`. + num_live: The number of timeline events which have just occurred and are not historical. + The last N events are 'live' and should be treated as such. This is mostly + useful to determine whether a given @mention event should make a noise or not. + Clients cannot rely solely on the absence of `initial: true` to determine live + events because if a room not in the sliding window bumps into the window because + of an @mention it will have `initial: true` yet contain a single live event + (with potentially other old events in the timeline). + bump_stamp: The `stream_ordering` of the last event according to the + `bump_event_types`. This helps clients sort more readily without them + needing to pull in a bunch of the timeline to determine the last activity. + `bump_event_types` is a thing because for example, we don't want display + name changes to mark the room as unread and bump it to the top. For + encrypted rooms, we just have to consider any activity as a bump because we + can't see the content and the client has to figure it out for themselves. + This may not be included if there hasn't been a change. + joined_count: The number of users with membership of join, including the client's + own user ID. (same as sync `v2 m.joined_member_count`) + invited_count: The number of users with membership of invite. (same as sync v2 + `m.invited_member_count`) + notification_count: The total number of unread notifications for this room. (same + as sync v2) + highlight_count: The number of unread notifications for this room with the highlight + flag set. (same as sync v2) + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class StrippedHero: + user_id: str + display_name: Optional[str] + avatar_url: Optional[str] + + name: Optional[str] + avatar: Optional[str] + heroes: Optional[List[StrippedHero]] + is_dm: bool + initial: bool + unstable_expanded_timeline: bool + # Should be empty for invite/knock rooms with `stripped_state` + required_state: List[EventBase] + # Should be empty for invite/knock rooms with `stripped_state` + timeline_events: List[EventBase] + bundled_aggregations: Optional[Dict[str, "BundledAggregations"]] + # Optional because it's only relevant to invite/knock rooms + stripped_state: List[JsonDict] + # Only optional because it won't be included for invite/knock rooms with `stripped_state` + prev_batch: Optional[StreamToken] + # Only optional because it won't be included for invite/knock rooms with `stripped_state` + limited: Optional[bool] + # Only optional because it won't be included for invite/knock rooms with `stripped_state` + num_live: Optional[int] + bump_stamp: Optional[int] + joined_count: Optional[int] + invited_count: Optional[int] + notification_count: int + highlight_count: int + + def __bool__(self) -> bool: + return ( + # If this is the first time the client is seeing the room, we should not filter it out + # under any circumstance. + self.initial + # We need to let the client know if any of the info has changed + or self.name is not None + or self.avatar is not None + or bool(self.heroes) + or self.joined_count is not None + or self.invited_count is not None + # We need to let the client know if there are any new events + or bool(self.required_state) + or bool(self.timeline_events) + or bool(self.stripped_state) + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class SlidingWindowList: + """ + Attributes: + count: The total number of entries in the list. Always present if this list + is. + ops: The sliding list operations to perform. + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class Operation: + """ + Attributes: + op: The operation type to perform. + range: Which index positions are affected by this operation. These are + both inclusive. + room_ids: Which room IDs are affected by this operation. These IDs match + up to the positions in the `range`, so the last room ID in this list + matches the 9th index. The room data is held in a separate object. + """ + + op: OperationType + range: Tuple[int, int] + room_ids: List[str] + + count: int + ops: List[Operation] + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class Extensions: + """Responses for extensions + + Attributes: + to_device: The to-device extension (MSC3885) + e2ee: The E2EE device extension (MSC3884) + """ + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ToDeviceExtension: + """The to-device extension (MSC3885) + + Attributes: + next_batch: The to-device stream token the client should use + to get more results + events: A list of to-device messages for the client + """ + + next_batch: str + events: Sequence[JsonMapping] + + def __bool__(self) -> bool: + return bool(self.events) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class E2eeExtension: + """The E2EE device extension (MSC3884) + + Attributes: + device_list_updates: List of user_ids whose devices have changed or left (only + present on incremental syncs). + device_one_time_keys_count: Map from key algorithm to the number of + unclaimed one-time keys currently held on the server for this device. If + an algorithm is unlisted, the count for that algorithm is assumed to be + zero. If this entire parameter is missing, the count for all algorithms + is assumed to be zero. + device_unused_fallback_key_types: List of unused fallback key algorithms + for this device. + """ + + # Only present on incremental syncs + device_list_updates: Optional[DeviceListUpdates] + device_one_time_keys_count: Mapping[str, int] + device_unused_fallback_key_types: Sequence[str] + + def __bool__(self) -> bool: + # Note that "signed_curve25519" is always returned in key count responses + # regardless of whether we uploaded any keys for it. This is necessary until + # https://github.com/matrix-org/matrix-doc/issues/3298 is fixed. + # + # Also related: + # https://github.com/element-hq/element-android/issues/3725 and + # https://github.com/matrix-org/synapse/issues/10456 + default_otk = self.device_one_time_keys_count.get("signed_curve25519") + more_than_default_otk = len(self.device_one_time_keys_count) > 1 or ( + default_otk is not None and default_otk > 0 + ) + + return bool( + more_than_default_otk + or self.device_list_updates + or self.device_unused_fallback_key_types + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class AccountDataExtension: + """The Account Data extension (MSC3959) + + Attributes: + global_account_data_map: Mapping from `type` to `content` of global + account data events. + account_data_by_room_map: Mapping from room_id to mapping of `type` to + `content` of room account data events. + """ + + global_account_data_map: Mapping[str, JsonMapping] + account_data_by_room_map: Mapping[str, Mapping[str, JsonMapping]] + + def __bool__(self) -> bool: + return bool( + self.global_account_data_map or self.account_data_by_room_map + ) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ReceiptsExtension: + """The Receipts extension (MSC3960) + + Attributes: + room_id_to_receipt_map: Mapping from room_id to `m.receipt` ephemeral + event (type, content) + """ + + room_id_to_receipt_map: Mapping[str, JsonMapping] + + def __bool__(self) -> bool: + return bool(self.room_id_to_receipt_map) + + @attr.s(slots=True, frozen=True, auto_attribs=True) + class TypingExtension: + """The Typing Notification extension (MSC3961) + + Attributes: + room_id_to_typing_map: Mapping from room_id to `m.typing` ephemeral + event (type, content) + """ + + room_id_to_typing_map: Mapping[str, JsonMapping] + + def __bool__(self) -> bool: + return bool(self.room_id_to_typing_map) + + to_device: Optional[ToDeviceExtension] = None + e2ee: Optional[E2eeExtension] = None + account_data: Optional[AccountDataExtension] = None + receipts: Optional[ReceiptsExtension] = None + typing: Optional[TypingExtension] = None + + def __bool__(self) -> bool: + return bool( + self.to_device + or self.e2ee + or self.account_data + or self.receipts + or self.typing + ) + + next_pos: SlidingSyncStreamToken + lists: Mapping[str, SlidingWindowList] + rooms: Dict[str, RoomResult] + extensions: Extensions + + def __bool__(self) -> bool: + """Make the result appear empty if there are no updates. This is used + to tell if the notifier needs to wait for more events when polling for + events. + """ + # We don't include `self.lists` here, as a) `lists` is always non-empty even if + # there are no changes, and b) since we're sorting rooms by `stream_ordering` of + # the latest activity, anything that would cause the order to change would end + # up in `self.rooms` and cause us to send down the change. + return bool(self.rooms or self.extensions) + + @staticmethod + def empty(next_pos: SlidingSyncStreamToken) -> "SlidingSyncResult": + "Return a new empty result" + return SlidingSyncResult( + next_pos=next_pos, + lists={}, + rooms={}, + extensions=SlidingSyncResult.Extensions(), + ) + + +class StateValues: + """ + Understood values of the (type, state_key) tuple in `required_state`. + """ + + # Include all state events of the given type + WILDCARD: Final = "*" + # Lazy-load room membership events (include room membership events for any event + # `sender` or membership change target in the timeline). We only give special + # meaning to this value when it's a `state_key`. + LAZY: Final = "$LAZY" + # Subsitute with the requester's user ID. Typically used by clients to get + # the user's membership. + ME: Final = "$ME" + + +# We can't freeze this class because we want to update it in place with the +# de-duplicated data. +@attr.s(slots=True, auto_attribs=True, frozen=True) +class RoomSyncConfig: + """ + Holds the config for what data we should fetch for a room in the sync response. + + Attributes: + timeline_limit: The maximum number of events to return in the timeline. + + required_state_map: Map from state event type to state_keys requested for the + room. The values are close to `StateKey` but actually use a syntax where you + can provide `*` wildcard and `$LAZY` for lazy-loading room members. + """ + + timeline_limit: int + required_state_map: Mapping[str, AbstractSet[str]] + + @classmethod + def from_room_config( + cls, + room_params: SlidingSyncConfig.CommonRoomParameters, + ) -> "RoomSyncConfig": + """ + Create a `RoomSyncConfig` from a `SlidingSyncList`/`RoomSubscription` config. + + Args: + room_params: `SlidingSyncConfig.SlidingSyncList` or `SlidingSyncConfig.RoomSubscription` + """ + required_state_map: Dict[str, Set[str]] = {} + for ( + state_type, + state_key, + ) in room_params.required_state: + # If we already have a wildcard for this specific `state_key`, we don't need + # to add it since the wildcard already covers it. + if state_key in required_state_map.get(StateValues.WILDCARD, set()): + continue + + # If we already have a wildcard `state_key` for this `state_type`, we don't need + # to add anything else + if StateValues.WILDCARD in required_state_map.get(state_type, set()): + continue + + # If we're getting wildcards for the `state_type` and `state_key`, that's + # all that matters so get rid of any other entries + if state_type == StateValues.WILDCARD and state_key == StateValues.WILDCARD: + required_state_map = {StateValues.WILDCARD: {StateValues.WILDCARD}} + # We can break, since we don't need to add anything else + break + + # If we're getting a wildcard for the `state_type`, get rid of any other + # entries with the same `state_key`, since the wildcard will cover it already. + elif state_type == StateValues.WILDCARD: + # Get rid of any entries that match the `state_key` + # + # Make a copy so we don't run into an error: `dictionary changed size + # during iteration`, when we remove items + for ( + existing_state_type, + existing_state_key_set, + ) in list(required_state_map.items()): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for existing_state_key in existing_state_key_set.copy(): + if existing_state_key == state_key: + existing_state_key_set.remove(state_key) + + # If we've the left the `set()` empty, remove it from the map + if existing_state_key_set == set(): + required_state_map.pop(existing_state_type, None) + + # If we're getting a wildcard `state_key`, get rid of any other state_keys + # for this `state_type` since the wildcard will cover it already. + if state_key == StateValues.WILDCARD: + required_state_map[state_type] = {state_key} + # Otherwise, just add it to the set + else: + if required_state_map.get(state_type) is None: + required_state_map[state_type] = {state_key} + else: + required_state_map[state_type].add(state_key) + + return cls( + timeline_limit=room_params.timeline_limit, + required_state_map=required_state_map, + ) + + def combine_room_sync_config( + self, other_room_sync_config: "RoomSyncConfig" + ) -> "RoomSyncConfig": + """ + Combine this `RoomSyncConfig` with another `RoomSyncConfig` and return the + superset union of the two. + """ + timeline_limit = self.timeline_limit + required_state_map = { + event_type: set(state_keys) + for event_type, state_keys in self.required_state_map.items() + } + + # Take the highest timeline limit + if timeline_limit < other_room_sync_config.timeline_limit: + timeline_limit = other_room_sync_config.timeline_limit + + # Union the required state + for ( + state_type, + state_key_set, + ) in other_room_sync_config.required_state_map.items(): + # If we already have a wildcard for everything, we don't need to add + # anything else + if StateValues.WILDCARD in required_state_map.get( + StateValues.WILDCARD, set() + ): + break + + # If we already have a wildcard `state_key` for this `state_type`, we don't need + # to add anything else + if StateValues.WILDCARD in required_state_map.get(state_type, set()): + continue + + # If we're getting wildcards for the `state_type` and `state_key`, that's + # all that matters so get rid of any other entries + if ( + state_type == StateValues.WILDCARD + and StateValues.WILDCARD in state_key_set + ): + required_state_map = {state_type: {StateValues.WILDCARD}} + # We can break, since we don't need to add anything else + break + + for state_key in state_key_set: + # If we already have a wildcard for this specific `state_key`, we don't need + # to add it since the wildcard already covers it. + if state_key in required_state_map.get(StateValues.WILDCARD, set()): + continue + + # If we're getting a wildcard for the `state_type`, get rid of any other + # entries with the same `state_key`, since the wildcard will cover it already. + if state_type == StateValues.WILDCARD: + # Get rid of any entries that match the `state_key` + # + # Make a copy so we don't run into an error: `dictionary changed size + # during iteration`, when we remove items + for existing_state_type, existing_state_key_set in list( + required_state_map.items() + ): + # Make a copy so we don't run into an error: `Set changed size during + # iteration`, when we filter out and remove items + for existing_state_key in existing_state_key_set.copy(): + if existing_state_key == state_key: + existing_state_key_set.remove(state_key) + + # If we've the left the `set()` empty, remove it from the map + if existing_state_key_set == set(): + required_state_map.pop(existing_state_type, None) + + # If we're getting a wildcard `state_key`, get rid of any other state_keys + # for this `state_type` since the wildcard will cover it already. + if state_key == StateValues.WILDCARD: + required_state_map[state_type] = {state_key} + break + # Otherwise, just add it to the set + else: + if required_state_map.get(state_type) is None: + required_state_map[state_type] = {state_key} + else: + required_state_map[state_type].add(state_key) + + return RoomSyncConfig(timeline_limit, required_state_map) + + def must_await_full_state( + self, + is_mine_id: Callable[[str], bool], + ) -> bool: + """ + Check if we have a we're only requesting `required_state` which is completely + satisfied even with partial state, then we don't need to `await_full_state` before + we can return it. + + Also see `StateFilter.must_await_full_state(...)` for comparison + + Partially-stated rooms should have all state events except for remote membership + events so if we require a remote membership event anywhere, then we need to + return `True` (requires full state). + + Args: + is_mine_id: a callable which confirms if a given state_key matches a mxid + of a local user + """ + wildcard_state_keys = self.required_state_map.get(StateValues.WILDCARD) + # Requesting *all* state in the room so we have to wait + if ( + wildcard_state_keys is not None + and StateValues.WILDCARD in wildcard_state_keys + ): + return True + + # If the wildcards don't refer to remote user IDs, then we don't need to wait + # for full state. + if wildcard_state_keys is not None: + for possible_user_id in wildcard_state_keys: + if not possible_user_id[0].startswith(UserID.SIGIL): + # Not a user ID + continue + + localpart_hostname = possible_user_id.split(":", 1) + if len(localpart_hostname) < 2: + # Not a user ID + continue + + if not is_mine_id(possible_user_id): + return True + + membership_state_keys = self.required_state_map.get(EventTypes.Member) + # We aren't requesting any membership events at all so the partial state will + # cover us. + if membership_state_keys is None: + return False + + # If we're requesting entirely local users, the partial state will cover us. + for user_id in membership_state_keys: + if user_id == StateValues.ME: + continue + # We're lazy-loading membership so we can just return the state we have. + # Lazy-loading means we include membership for any event `sender` or + # membership change target in the timeline but since we had to auth those + # timeline events, we will have the membership state for them (including + # from remote senders). + elif user_id == StateValues.LAZY: + continue + elif user_id == StateValues.WILDCARD: + return False + elif not is_mine_id(user_id): + return True + + # Local users only so the partial state will cover us. + return False + + +class HaveSentRoomFlag(Enum): + """Flag for whether we have sent the room down a sliding sync connection. + + The valid state changes here are: + NEVER -> LIVE + LIVE -> PREVIOUSLY + PREVIOUSLY -> LIVE + """ + + # The room has never been sent down (or we have forgotten we have sent it + # down). + NEVER = "never" + + # We have previously sent the room down, but there are updates that we + # haven't sent down. + PREVIOUSLY = "previously" + + # We have sent the room down and the client has received all updates. + LIVE = "live" + + +T = TypeVar("T", str, RoomStreamToken, MultiWriterStreamToken, int) + + +@attr.s(auto_attribs=True, slots=True, frozen=True) +class HaveSentRoom(Generic[T]): + """Whether we have sent the room data down a sliding sync connection. + + We are generic over the type of token used, e.g. `RoomStreamToken` or + `MultiWriterStreamToken`. + + Attributes: + status: Flag of if we have or haven't sent down the room + last_token: If the flag is `PREVIOUSLY` then this is non-null and + contains the last stream token of the last updates we sent down + the room, i.e. we still need to send everything since then to the + client. + """ + + status: HaveSentRoomFlag + last_token: Optional[T] + + @staticmethod + def live() -> "HaveSentRoom[T]": + return HaveSentRoom(HaveSentRoomFlag.LIVE, None) + + @staticmethod + def previously(last_token: T) -> "HaveSentRoom[T]": + """Constructor for `PREVIOUSLY` flag.""" + return HaveSentRoom(HaveSentRoomFlag.PREVIOUSLY, last_token) + + @staticmethod + def never() -> "HaveSentRoom[T]": + # We use a singleton to avoid repeatedly instantiating new `never` + # values. + return _HAVE_SENT_ROOM_NEVER + + +_HAVE_SENT_ROOM_NEVER: HaveSentRoom[Any] = HaveSentRoom(HaveSentRoomFlag.NEVER, None) + + +@attr.s(auto_attribs=True, slots=True, frozen=True) +class RoomStatusMap(Generic[T]): + """For a given stream, e.g. events, records what we have or have not sent + down for that stream in a given room.""" + + # `room_id` -> `HaveSentRoom` + _statuses: Mapping[str, HaveSentRoom[T]] = attr.Factory(dict) + + def have_sent_room(self, room_id: str) -> HaveSentRoom[T]: + """Return whether we have previously sent the room down""" + return self._statuses.get(room_id, HaveSentRoom.never()) + + def get_mutable(self) -> "MutableRoomStatusMap[T]": + """Get a mutable copy of this state.""" + return MutableRoomStatusMap( + statuses=self._statuses, + ) + + def copy(self) -> "RoomStatusMap[T]": + """Make a copy of the class. Useful for converting from a mutable to + immutable version.""" + + return RoomStatusMap(statuses=dict(self._statuses)) + + def __len__(self) -> int: + return len(self._statuses) + + +class MutableRoomStatusMap(RoomStatusMap[T]): + """A mutable version of `RoomStatusMap`""" + + # We use a ChainMap here so that we can easily track what has been updated + # and what hasn't. Note that when we persist the per connection state this + # will get flattened to a normal dict (via calling `.copy()`) + _statuses: typing.ChainMap[str, HaveSentRoom[T]] + + def __init__( + self, + statuses: Mapping[str, HaveSentRoom[T]], + ) -> None: + # ChainMap requires a mutable mapping, but we're not actually going to + # mutate it. + statuses = cast(MutableMapping, statuses) + + super().__init__( + statuses=ChainMap({}, statuses), + ) + + def get_updates(self) -> Mapping[str, HaveSentRoom[T]]: + """Return only the changes that were made""" + return self._statuses.maps[0] + + def record_sent_rooms(self, room_ids: StrCollection) -> None: + """Record that we have sent these rooms in the response""" + for room_id in room_ids: + current_status = self._statuses.get(room_id, HaveSentRoom.never()) + if current_status.status == HaveSentRoomFlag.LIVE: + continue + + self._statuses[room_id] = HaveSentRoom.live() + + def record_unsent_rooms(self, room_ids: StrCollection, from_token: T) -> None: + """Record that we have not sent these rooms in the response, but there + have been updates. + """ + # Whether we add/update the entries for unsent rooms depends on the + # existing entry: + # - LIVE: We have previously sent down everything up to + # `last_room_token, so we update the entry to be `PREVIOUSLY` with + # `last_room_token`. + # - PREVIOUSLY: We have previously sent down everything up to *a* + # given token, so we don't need to update the entry. + # - NEVER: We have never previously sent down the room, and we haven't + # sent anything down this time either so we leave it as NEVER. + + for room_id in room_ids: + current_status = self._statuses.get(room_id, HaveSentRoom.never()) + if current_status.status != HaveSentRoomFlag.LIVE: + continue + + self._statuses[room_id] = HaveSentRoom.previously(from_token) + + +@attr.s(auto_attribs=True, frozen=True) +class PerConnectionState: + """The per-connection state. A snapshot of what we've sent down the + connection before. + + Currently, we track whether we've sent down various aspects of a given room + before. + + We use the `rooms` field to store the position in the events stream for each + room that we've previously sent to the client before. On the next request + that includes the room, we can then send only what's changed since that + recorded position. + + Same goes for the `receipts` field so we only need to send the new receipts + since the last time you made a sync request. + + Attributes: + rooms: The status of each room for the events stream. + receipts: The status of each room for the receipts stream. + room_configs: Map from room_id to the `RoomSyncConfig` of all + rooms that we have previously sent down. + """ + + rooms: RoomStatusMap[RoomStreamToken] = attr.Factory(RoomStatusMap) + receipts: RoomStatusMap[MultiWriterStreamToken] = attr.Factory(RoomStatusMap) + account_data: RoomStatusMap[int] = attr.Factory(RoomStatusMap) + + room_configs: Mapping[str, RoomSyncConfig] = attr.Factory(dict) + + def get_mutable(self) -> "MutablePerConnectionState": + """Get a mutable copy of this state.""" + room_configs = cast(MutableMapping[str, RoomSyncConfig], self.room_configs) + + return MutablePerConnectionState( + rooms=self.rooms.get_mutable(), + receipts=self.receipts.get_mutable(), + account_data=self.account_data.get_mutable(), + room_configs=ChainMap({}, room_configs), + ) + + def copy(self) -> "PerConnectionState": + return PerConnectionState( + rooms=self.rooms.copy(), + receipts=self.receipts.copy(), + account_data=self.account_data.copy(), + room_configs=dict(self.room_configs), + ) + + def __len__(self) -> int: + return len(self.rooms) + len(self.receipts) + len(self.room_configs) + + +@attr.s(auto_attribs=True) +class MutablePerConnectionState(PerConnectionState): + """A mutable version of `PerConnectionState`""" + + rooms: MutableRoomStatusMap[RoomStreamToken] + receipts: MutableRoomStatusMap[MultiWriterStreamToken] + account_data: MutableRoomStatusMap[int] + + room_configs: typing.ChainMap[str, RoomSyncConfig] + + def has_updates(self) -> bool: + return ( + bool(self.rooms.get_updates()) + or bool(self.receipts.get_updates()) + or bool(self.account_data.get_updates()) + or bool(self.get_room_config_updates()) + ) + + def get_room_config_updates(self) -> Mapping[str, RoomSyncConfig]: + """Get updates to the room sync config""" + return self.room_configs.maps[0] diff --git a/synapse/types/rest/__init__.py b/synapse/types/rest/__init__.py
index 2b6f5ed35a..183831e79a 100644 --- a/synapse/types/rest/__init__.py +++ b/synapse/types/rest/__init__.py
@@ -18,14 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING - -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel, Extra -else: - from pydantic import BaseModel, Extra +from synapse._pydantic_compat import BaseModel, Extra class RequestBodyModel(BaseModel): diff --git a/synapse/types/rest/client/__init__.py b/synapse/types/rest/client/__init__.py
index 93b537ab7b..2d386bfe53 100644 --- a/synapse/types/rest/client/__init__.py +++ b/synapse/types/rest/client/__init__.py
@@ -20,31 +20,16 @@ # from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union -from synapse._pydantic_compat import HAS_PYDANTIC_V2 - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import ( - Extra, - StrictBool, - StrictInt, - StrictStr, - conint, - constr, - validator, - ) -else: - from pydantic import ( - Extra, - StrictBool, - StrictInt, - StrictStr, - conint, - constr, - validator, - ) - +from synapse._pydantic_compat import ( + Extra, + StrictBool, + StrictInt, + StrictStr, + conint, + constr, + validator, +) from synapse.types.rest import RequestBodyModel -from synapse.util.threepids import validate_email class AuthenticationData(RequestBodyModel): @@ -76,33 +61,6 @@ else: ) -class ThreepidRequestTokenBody(RequestBodyModel): - client_secret: ClientSecretStr - id_server: Optional[StrictStr] - id_access_token: Optional[StrictStr] - next_link: Optional[StrictStr] - send_attempt: StrictInt - - @validator("id_access_token", always=True) - def token_required_for_identity_server( - cls, token: Optional[str], values: Dict[str, object] - ) -> Optional[str]: - if values.get("id_server") is not None and token is None: - raise ValueError("id_access_token is required if an id_server is supplied.") - return token - - -class EmailRequestTokenBody(ThreepidRequestTokenBody): - email: StrictStr - - # Canonicalise the email address. The addresses are all stored canonicalised - # in the database. This allows the user to reset his password without having to - # know the exact spelling (eg. upper and lower case) of address in the database. - # Without this, an email stored in the database as "foo@bar.com" would cause - # user requests for "FOO@bar.com" to raise a Not Found error. - _email_validator = validator("email", allow_reuse=True)(validate_email) - - if TYPE_CHECKING: ISO3116_1_Alpha_2 = StrictStr else: @@ -110,11 +68,6 @@ else: ISO3116_1_Alpha_2 = constr(regex="[A-Z]{2}", strict=True) -class MsisdnRequestTokenBody(ThreepidRequestTokenBody): - country: ISO3116_1_Alpha_2 - phone_number: StrictStr - - class SlidingSyncBody(RequestBodyModel): """ Sliding Sync API request body. @@ -268,7 +221,9 @@ class SlidingSyncBody(RequestBodyModel): if TYPE_CHECKING: ranges: Optional[List[Tuple[int, int]]] = None else: - ranges: Optional[List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]]] = None # type: ignore[valid-type] + ranges: Optional[ + List[Tuple[conint(ge=0, strict=True), conint(ge=0, strict=True)]] + ] = None # type: ignore[valid-type] slow_get_all_rooms: Optional[StrictBool] = False filters: Optional[Filters] = None @@ -382,13 +337,15 @@ class SlidingSyncBody(RequestBodyModel): receipts: Optional[ReceiptsExtension] = None typing: Optional[TypingExtension] = None - conn_id: Optional[str] + conn_id: Optional[StrictStr] # mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884 if TYPE_CHECKING: lists: Optional[Dict[str, SlidingSyncList]] = None else: - lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = None # type: ignore[valid-type] + lists: Optional[Dict[constr(max_length=64, strict=True), SlidingSyncList]] = ( + None # type: ignore[valid-type] + ) room_subscriptions: Optional[Dict[StrictStr, RoomSubscription]] = None extensions: Optional[Extensions] = None diff --git a/synapse/types/state.py b/synapse/types/state.py
index c958a95701..6420e050a5 100644 --- a/synapse/types/state.py +++ b/synapse/types/state.py
@@ -68,15 +68,23 @@ class StateFilter: include_others: bool = False def __attrs_post_init__(self) -> None: - # If `include_others` is set we canonicalise the filter by removing - # wildcards from the types dictionary if self.include_others: + # If `include_others` is set we canonicalise the filter by removing + # wildcards from the types dictionary + # this is needed to work around the fact that StateFilter is frozen object.__setattr__( self, "types", immutabledict({k: v for k, v in self.types.items() if v is not None}), ) + else: + # Otherwise we remove entries where the value is the empty set. + object.__setattr__( + self, + "types", + immutabledict({k: v for k, v in self.types.items() if v is None or v}), + ) @staticmethod def all() -> "StateFilter": @@ -454,7 +462,7 @@ class StateFilter: new_types.update({state_type: set() for state_type in minus_wildcards}) # insert the plus wildcards - new_types.update({state_type: None for state_type in plus_wildcards}) + new_types.update(dict.fromkeys(plus_wildcards)) # insert the specific state keys for state_type, state_key in plus_state_keys: @@ -503,13 +511,19 @@ class StateFilter: # - if so, which event types are excluded? ('excludes') # - which entire event types to include ('wildcards') # - which concrete state keys to include ('concrete state keys') - (self_all, self_excludes), ( - self_wildcards, - self_concrete_keys, + ( + (self_all, self_excludes), + ( + self_wildcards, + self_concrete_keys, + ), ) = self._decompose_into_four_parts() - (other_all, other_excludes), ( - other_wildcards, - other_concrete_keys, + ( + (other_all, other_excludes), + ( + other_wildcards, + other_concrete_keys, + ), ) = other._decompose_into_four_parts() # Start with an estimate of the difference based on self @@ -610,6 +624,13 @@ class StateFilter: return False + def __bool__(self) -> bool: + """Returns true if this state filter will match any state, or false if + this is the empty filter""" + if self.include_others: + return True + return bool(self.types) + _ALL_STATE_FILTER = StateFilter(types=immutabledict(), include_others=True) _ALL_NON_MEMBER_STATE_FILTER = StateFilter( diff --git a/synapse/types/storage/__init__.py b/synapse/types/storage/__init__.py new file mode 100644
index 0000000000..378a15e038 --- /dev/null +++ b/synapse/types/storage/__init__.py
@@ -0,0 +1,56 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# Originally licensed under the Apache License, Version 2.0: +# <http://www.apache.org/licenses/LICENSE-2.0>. +# +# [This file includes modifications made by New Vector Limited] +# +# + + +class _BackgroundUpdates: + EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" + EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + POPULATE_STREAM_ORDERING2 = "populate_stream_ordering2" + INDEX_STREAM_ORDERING2 = "index_stream_ordering2" + INDEX_STREAM_ORDERING2_CONTAINS_URL = "index_stream_ordering2_contains_url" + INDEX_STREAM_ORDERING2_ROOM_ORDER = "index_stream_ordering2_room_order" + INDEX_STREAM_ORDERING2_ROOM_STREAM = "index_stream_ordering2_room_stream" + INDEX_STREAM_ORDERING2_TS = "index_stream_ordering2_ts" + REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column" + + EVENT_EDGES_DROP_INVALID_ROWS = "event_edges_drop_invalid_rows" + EVENT_EDGES_REPLACE_INDEX = "event_edges_replace_index" + + EVENTS_POPULATE_STATE_KEY_REJECTIONS = "events_populate_state_key_rejections" + + EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index" + + SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE = ( + "sliding_sync_prefill_joined_rooms_to_recalculate_table_bg_update" + ) + SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE = "sliding_sync_joined_rooms_bg_update" + SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE = ( + "sliding_sync_membership_snapshots_bg_update" + ) + SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE = ( + "sliding_sync_membership_snapshots_fix_forgotten_column_bg_update" + ) + + MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE = ( + "mark_unreferenced_state_groups_for_deletion_bg_update" + ) + + FIXUP_MAX_DEPTH_CAP = "fixup_max_depth_cap" diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 70139beef2..e596e1ed20 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py
@@ -41,6 +41,7 @@ from typing import ( Hashable, Iterable, List, + Literal, Optional, Set, Tuple, @@ -51,7 +52,7 @@ from typing import ( ) import attr -from typing_extensions import Concatenate, Literal, ParamSpec +from typing_extensions import Concatenate, ParamSpec, Unpack from twisted.internet import defer from twisted.internet.defer import CancelledError @@ -61,6 +62,7 @@ from twisted.python.failure import Failure from synapse.logging.context import ( PreserveLoggingContext, make_deferred_yieldable, + run_coroutine_in_background, run_in_background, ) from synapse.util import Clock @@ -344,6 +346,7 @@ T1 = TypeVar("T1") T2 = TypeVar("T2") T3 = TypeVar("T3") T4 = TypeVar("T4") +T5 = TypeVar("T5") @overload @@ -402,6 +405,112 @@ def gather_results( # type: ignore[misc] return deferred.addCallback(tuple) +@overload +async def gather_optional_coroutines( + *coroutines: Unpack[Tuple[Optional[Coroutine[Any, Any, T1]]]], +) -> Tuple[Optional[T1]]: ... + + +@overload +async def gather_optional_coroutines( + *coroutines: Unpack[ + Tuple[ + Optional[Coroutine[Any, Any, T1]], + Optional[Coroutine[Any, Any, T2]], + ] + ], +) -> Tuple[Optional[T1], Optional[T2]]: ... + + +@overload +async def gather_optional_coroutines( + *coroutines: Unpack[ + Tuple[ + Optional[Coroutine[Any, Any, T1]], + Optional[Coroutine[Any, Any, T2]], + Optional[Coroutine[Any, Any, T3]], + ] + ], +) -> Tuple[Optional[T1], Optional[T2], Optional[T3]]: ... + + +@overload +async def gather_optional_coroutines( + *coroutines: Unpack[ + Tuple[ + Optional[Coroutine[Any, Any, T1]], + Optional[Coroutine[Any, Any, T2]], + Optional[Coroutine[Any, Any, T3]], + Optional[Coroutine[Any, Any, T4]], + ] + ], +) -> Tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4]]: ... + + +@overload +async def gather_optional_coroutines( + *coroutines: Unpack[ + Tuple[ + Optional[Coroutine[Any, Any, T1]], + Optional[Coroutine[Any, Any, T2]], + Optional[Coroutine[Any, Any, T3]], + Optional[Coroutine[Any, Any, T4]], + Optional[Coroutine[Any, Any, T5]], + ] + ], +) -> Tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4], Optional[T5]]: ... + + +async def gather_optional_coroutines( + *coroutines: Unpack[Tuple[Optional[Coroutine[Any, Any, T1]], ...]], +) -> Tuple[Optional[T1], ...]: + """Helper function that allows waiting on multiple coroutines at once. + + The return value is a tuple of the return values of the coroutines in order. + + If a `None` is passed instead of a coroutine, it will be ignored and a None + is returned in the tuple. + + Note: For typechecking we need to have an explicit overload for each + distinct number of coroutines passed in. If you see type problems, it's + likely because you're using many arguments and you need to add a new + overload above. + """ + + try: + results = await make_deferred_yieldable( + defer.gatherResults( + [ + run_coroutine_in_background(coroutine) + for coroutine in coroutines + if coroutine is not None + ], + consumeErrors=True, + ) + ) + + results_iter = iter(results) + return tuple( + next(results_iter) if coroutine is not None else None + for coroutine in coroutines + ) + except defer.FirstError as dfe: + # unwrap the error from defer.gatherResults. + + # The raised exception's traceback only includes func() etc if + # the 'await' happens before the exception is thrown - ie if the failure + # happens *asynchronously* - otherwise Twisted throws away the traceback as it + # could be large. + # + # We could maybe reconstruct a fake traceback from Failure.frames. Or maybe + # we could throw Twisted into the fires of Mordor. + + # suppress exception chaining, because the FirstError doesn't tell us anything + # very interesting. + assert isinstance(dfe.subFailure.value, BaseException) + raise dfe.subFailure.value from None + + @attr.s(slots=True, auto_attribs=True) class _LinearizerEntry: # The number of things executing. @@ -885,3 +994,46 @@ class AwakenableSleeper: # Cancel the sleep if we were woken up if call.active(): call.cancel() + + +class DeferredEvent: + """Like threading.Event but for async code""" + + def __init__(self, reactor: IReactorTime) -> None: + self._reactor = reactor + self._deferred: "defer.Deferred[None]" = defer.Deferred() + + def set(self) -> None: + if not self._deferred.called: + self._deferred.callback(None) + + def clear(self) -> None: + if self._deferred.called: + self._deferred = defer.Deferred() + + def is_set(self) -> bool: + return self._deferred.called + + async def wait(self, timeout_seconds: float) -> bool: + if self.is_set(): + return True + + # Create a deferred that gets called in N seconds + sleep_deferred: "defer.Deferred[None]" = defer.Deferred() + call = self._reactor.callLater(timeout_seconds, sleep_deferred.callback, None) + + try: + await make_deferred_yieldable( + defer.DeferredList( + [sleep_deferred, self._deferred], + fireOnOneCallback=True, + fireOnOneErrback=True, + consumeErrors=True, + ) + ) + finally: + # Cancel the sleep if we were woken up + if call.active(): + call.cancel() + + return self.is_set() diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py
index 1e6696332f..14bd3ba3b0 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py
@@ -21,10 +21,19 @@ import enum import logging import threading -from typing import Dict, Generic, Iterable, Optional, Set, Tuple, TypeVar, Union +from typing import ( + Dict, + Generic, + Iterable, + Literal, + Optional, + Set, + Tuple, + TypeVar, + Union, +) import attr -from typing_extensions import Literal from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
index 8017c031ee..3198fdd2ed 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py
@@ -21,10 +21,9 @@ import logging from collections import OrderedDict -from typing import Any, Generic, Iterable, Optional, TypeVar, Union, overload +from typing import Any, Generic, Iterable, Literal, Optional, TypeVar, Union, overload import attr -from typing_extensions import Literal from twisted.internet import defer diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py
index 481a1a621e..2e5efa3a52 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py
@@ -34,6 +34,7 @@ from typing import ( Generic, Iterable, List, + Literal, Optional, Set, Tuple, @@ -44,8 +45,6 @@ from typing import ( overload, ) -from typing_extensions import Literal - from twisted.internet import reactor from twisted.internet.interfaces import IReactorTime diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index 96b7ca83dc..54b99134b9 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py
@@ -101,7 +101,13 @@ class ResponseCache(Generic[KV]): used rather than trying to compute a new response. """ - def __init__(self, clock: Clock, name: str, timeout_ms: float = 0): + def __init__( + self, + clock: Clock, + name: str, + timeout_ms: float = 0, + enable_logging: bool = True, + ): self._result_cache: Dict[KV, ResponseCacheEntry] = {} self.clock = clock @@ -109,6 +115,7 @@ class ResponseCache(Generic[KV]): self._name = name self._metrics = register_cache("response_cache", name, self, resizable=False) + self._enable_logging = enable_logging def size(self) -> int: return len(self._result_cache) @@ -246,9 +253,12 @@ class ResponseCache(Generic[KV]): """ entry = self._get(key) if not entry: - logger.debug( - "[%s]: no cached result for [%s], calculating new one", self._name, key - ) + if self._enable_logging: + logger.debug( + "[%s]: no cached result for [%s], calculating new one", + self._name, + key, + ) context = ResponseCacheContext(cache_key=key) if cache_context: kwargs["cache_context"] = context @@ -269,12 +279,15 @@ class ResponseCache(Generic[KV]): return await make_deferred_yieldable(entry.result.observe()) result = entry.result.observe() - if result.called: - logger.info("[%s]: using completed cached result for [%s]", self._name, key) - else: - logger.info( - "[%s]: using incomplete cached result for [%s]", self._name, key - ) + if self._enable_logging: + if result.called: + logger.info( + "[%s]: using completed cached result for [%s]", self._name, key + ) + else: + logger.info( + "[%s]: using incomplete cached result for [%s]", self._name, key + ) span_context = entry.opentracing_span_context with start_active_span_follows_from( diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index 16fcb00206..5ac8643eef 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py
@@ -142,9 +142,9 @@ class StreamChangeCache: """ assert isinstance(stream_pos, int) - # _cache is not valid at or before the earliest known stream position, so + # _cache is not valid before the earliest known stream position, so # return that the entity has changed. - if stream_pos <= self._earliest_known_stream_pos: + if stream_pos < self._earliest_known_stream_pos: self.metrics.inc_misses() return True @@ -186,7 +186,7 @@ class StreamChangeCache: This will be all entities if the given stream position is at or earlier than the earliest known stream position. """ - if not self._cache or stream_pos <= self._earliest_known_stream_pos: + if not self._cache or stream_pos < self._earliest_known_stream_pos: self.metrics.inc_misses() return set(entities) @@ -238,9 +238,9 @@ class StreamChangeCache: """ assert isinstance(stream_pos, int) - # _cache is not valid at or before the earliest known stream position, so + # _cache is not valid before the earliest known stream position, so # return that an entity has changed. - if stream_pos <= self._earliest_known_stream_pos: + if stream_pos < self._earliest_known_stream_pos: self.metrics.inc_misses() return True @@ -270,9 +270,9 @@ class StreamChangeCache: """ assert isinstance(stream_pos, int) - # _cache is not valid at or before the earliest known stream position, so + # _cache is not valid before the earliest known stream position, so # return None to mark that it is unknown if an entity has changed. - if stream_pos <= self._earliest_known_stream_pos: + if stream_pos < self._earliest_known_stream_pos: return AllEntitiesChangedResult(None) changed_entities: List[EntityType] = [] @@ -314,6 +314,15 @@ class StreamChangeCache: self._entity_to_key[entity] = stream_pos self._evict() + def all_entities_changed(self, stream_pos: int) -> None: + """ + Mark all entities as changed. This is useful when the cache is invalidated and + there may be some potential change for all of the entities. + """ + self._cache.clear() + self._entity_to_key.clear() + self._earliest_known_stream_pos = stream_pos + def _evict(self) -> None: """ Ensure the cache has not exceeded the maximum size. diff --git a/synapse/util/events.py b/synapse/util/events.py new file mode 100644
index 0000000000..ad9b946578 --- /dev/null +++ b/synapse/util/events.py
@@ -0,0 +1,29 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# + +from synapse.util.stringutils import random_string + + +def generate_fake_event_id() -> str: + """ + Generate an event ID from random ASCII characters. + + This is primarily useful for generating fake event IDs in response to + requests from shadow-banned users. + + Returns: + A string intended to look like an event ID, but with no actual meaning. + """ + return "$" + random_string(43) diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py
index b73f690b88..0a6a30aab2 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py
@@ -30,14 +30,13 @@ from typing import ( Iterator, List, Mapping, + Protocol, Set, Sized, Tuple, TypeVar, ) -from typing_extensions import Protocol - T = TypeVar("T") S = TypeVar("S", bound="_SelfSlice") @@ -115,7 +114,7 @@ def sorted_topologically( # This is implemented by Kahn's algorithm. - degree_map = {node: 0 for node in nodes} + degree_map = dict.fromkeys(nodes, 0) reverse_graph: Dict[T, Set[T]] = {} for node, edges in graph.items(): @@ -165,7 +164,7 @@ def sorted_topologically_batched( persisted. """ - degree_map = {node: 0 for node in nodes} + degree_map = dict.fromkeys(nodes, 0) reverse_graph: Dict[T, Set[T]] = {} for node, edges in graph.items(): diff --git a/synapse/util/linked_list.py b/synapse/util/linked_list.py
index e9a5fff211..87f801c0cf 100644 --- a/synapse/util/linked_list.py +++ b/synapse/util/linked_list.py
@@ -19,8 +19,7 @@ # # -"""A circular doubly linked list implementation. -""" +"""A circular doubly linked list implementation.""" import threading from typing import Generic, Optional, Type, TypeVar diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py
index 84ae226207..6fa15543ec 100644 --- a/synapse/util/macaroons.py +++ b/synapse/util/macaroons.py
@@ -22,12 +22,11 @@ """Utilities for manipulating macaroons""" -from typing import Callable, Optional +from typing import Callable, Literal, Optional import attr import pymacaroons from pymacaroons.exceptions import MacaroonVerificationFailedException -from typing_extensions import Literal from synapse.util import Clock, stringutils diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py
index 517e79ce5f..6a389f7a7e 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py
@@ -22,10 +22,19 @@ import logging from functools import wraps from types import TracebackType -from typing import Awaitable, Callable, Dict, Generator, Optional, Type, TypeVar +from typing import ( + Awaitable, + Callable, + Dict, + Generator, + Optional, + Protocol, + Type, + TypeVar, +) from prometheus_client import CollectorRegistry, Counter, Metric -from typing_extensions import Concatenate, ParamSpec, Protocol +from typing_extensions import Concatenate, ParamSpec from synapse.logging.context import ( ContextResourceUsage, @@ -110,7 +119,7 @@ def measure_func( """ def wrapper( - func: Callable[Concatenate[HasClock, P], Awaitable[R]] + func: Callable[Concatenate[HasClock, P], Awaitable[R]], ) -> Callable[P, Awaitable[R]]: block_name = func.__name__ if name is None else name diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py deleted file mode 100644
index b6a784f0bc..0000000000 --- a/synapse/util/msisdn.py +++ /dev/null
@@ -1,51 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2017 Vector Creations Ltd -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -import phonenumbers - -from synapse.api.errors import SynapseError - - -def phone_number_to_msisdn(country: str, number: str) -> str: - """ - Takes an ISO-3166-1 2 letter country code and phone number and - returns an msisdn representing the canonical version of that - phone number. - - As an example, if `country` is "GB" and `number` is "7470674927", this - function will return "447470674927". - - Args: - country: ISO-3166-1 2 letter country code - number: Phone number in a national or international format - - Returns: - The canonical form of the phone number, as an msisdn. - Raises: - SynapseError if the number could not be parsed. - """ - try: - phoneNumber = phonenumbers.parse(number, country) - except phonenumbers.NumberParseException: - raise SynapseError(400, "Unable to parse phone number") - return phonenumbers.format_number(phoneNumber, phonenumbers.PhoneNumberFormat.E164)[ - 1: - ] diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py
index 46dad32156..beea4d2888 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py
@@ -50,7 +50,7 @@ def do_patch() -> None: return def new_inline_callbacks( - f: Callable[P, Generator["Deferred[object]", object, T]] + f: Callable[P, Generator["Deferred[object]", object, T]], ) -> Callable[P, "Deferred[T]"]: @functools.wraps(f) def wrapped(*args: P.args, **kwargs: P.kwargs) -> "Deferred[T]": @@ -162,7 +162,7 @@ def _check_yield_points( d = result.throwExceptionIntoGenerator(gen) else: d = gen.send(result) - except (StopIteration, defer._DefGen_Return) as e: + except StopIteration as e: if current_context() != expected_context: # This happens when the context is lost sometime *after* the # final yield and returning. E.g. we forgot to yield on a @@ -183,7 +183,7 @@ def _check_yield_points( ) ) changes.append(err) - # The `StopIteration` or `_DefGen_Return` contains the return value from the + # The `StopIteration` contains the return value from the # generator. return cast(T, e.value) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py
index 8ead72bb7a..3f067b792c 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py
@@ -103,7 +103,7 @@ _rate_limiter_instances_lock = threading.Lock() def _get_counts_from_rate_limiter_instance( - count_func: Callable[["FederationRateLimiter"], int] + count_func: Callable[["FederationRateLimiter"], int], ) -> Mapping[Tuple[str, ...], int]: """Returns a count of something (slept/rejected hosts) by (metrics_name)""" # Cast to a list to prevent it changing while the Prometheus diff --git a/synapse/util/rust.py b/synapse/util/rust.py
index 0e35d6d188..37f43459f1 100644 --- a/synapse/util/rust.py +++ b/synapse/util/rust.py
@@ -19,9 +19,12 @@ # # +import json import os -import sys +import urllib.parse from hashlib import blake2b +from importlib.metadata import Distribution, PackageNotFoundError +from typing import Optional import synapse from synapse.synapse_rust import get_rust_file_digest @@ -32,22 +35,17 @@ def check_rust_lib_up_to_date() -> None: be rebuilt. """ - if not _dist_is_editable(): - return - - synapse_dir = os.path.dirname(synapse.__file__) - synapse_root = os.path.abspath(os.path.join(synapse_dir, "..")) - - # Double check we've not gone into site-packages... - if os.path.basename(synapse_root) == "site-packages": - return - - # ... and it looks like the root of a python project. - if not os.path.exists("pyproject.toml"): - return + # Get the location of the editable install. + synapse_root = get_synapse_source_directory() + if synapse_root is None: + return None # Get the hash of all Rust source files - hash = _hash_rust_files_in_directory(os.path.join(synapse_root, "rust", "src")) + rust_path = os.path.join(synapse_root, "rust", "src") + if not os.path.exists(rust_path): + return None + + hash = _hash_rust_files_in_directory(rust_path) if hash != get_rust_file_digest(): raise Exception("Rust module outdated. Please rebuild using `poetry install`") @@ -82,10 +80,55 @@ def _hash_rust_files_in_directory(directory: str) -> str: return hasher.hexdigest() -def _dist_is_editable() -> bool: - """Is distribution an editable install?""" - for path_item in sys.path: - egg_link = os.path.join(path_item, "matrix-synapse.egg-link") - if os.path.isfile(egg_link): - return True - return False +def get_synapse_source_directory() -> Optional[str]: + """Try and find the source directory of synapse for editable installs (like + those used in development). + + Returns None if not an editable install (or otherwise can't find the source + directory). + """ + + # Try and find the installed matrix-synapse package. + try: + package = Distribution.from_name("matrix-synapse") + except PackageNotFoundError: + # The package is not found, so it's not installed and so must be being + # pulled out from a local directory (usually the current one). + synapse_dir = os.path.dirname(synapse.__file__) + synapse_root = os.path.abspath(os.path.join(synapse_dir, "..")) + + # Double check we've not gone into site-packages... + if os.path.basename(synapse_root) == "site-packages": + return None + + # ... and it looks like the root of a python project. + if not os.path.exists("pyproject.toml"): + return None + + return synapse_root + + # Read the `direct_url.json` metadata for the package. This won't exist for + # packages installed via a repository/etc. + # c.f. https://packaging.python.org/en/latest/specifications/direct-url/ + direct_url_json = package.read_text("direct_url.json") + if direct_url_json is None: + return None + + # c.f. https://packaging.python.org/en/latest/specifications/direct-url/ for + # the format + direct_url_dict: dict = json.loads(direct_url_json) + + # `url` must exist as a key, and point to where we fetched the repo from. + project_url = urllib.parse.urlparse(direct_url_dict["url"]) + + # If its not a local file then we must have built the rust libs either a) + # after we downloaded the package, or b) we built the download wheel. + if project_url.scheme != "file": + return None + + # And finally if its not an editable install then the files can't have + # changed since we installed the package. + if not direct_url_dict.get("dir_info", {}).get("editable", False): + return None + + return project_url.path diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
index 13ff54b669..32b5bc00c9 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py
@@ -43,6 +43,14 @@ CLIENT_SECRET_REGEX = re.compile(r"^[0-9a-zA-Z\.=_\-]+$") # MXC_REGEX = re.compile("^mxc://([^/]+)/([^/#?]+)$") +# https://spec.matrix.org/v1.13/appendices/#common-namespaced-identifier-grammar +# +# At least one character, less than or equal to 255 characters. Must start with +# a-z, the rest is a-z, 0-9, -, _, or .. +# +# This doesn't check anything about validity of namespaces. +NAMESPACED_GRAMMAR = re.compile(r"^[a-z][a-z0-9_.-]{0,254}$") + def random_string(length: int) -> str: """Generate a cryptographically secure string of random letters. @@ -68,6 +76,10 @@ def is_ascii(s: bytes) -> bool: return True +def is_namedspaced_grammar(s: str) -> bool: + return bool(NAMESPACED_GRAMMAR.match(s)) + + def assert_valid_client_secret(client_secret: str) -> None: """Validate that a given string matches the client_secret defined by the spec""" if ( diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py
index 448960b297..4683d09cd7 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py
@@ -46,33 +46,43 @@ logger = logging.getLogger(__name__) class TaskScheduler: """ - This is a simple task sheduler aimed at resumable tasks: usually we use `run_in_background` - to launch a background task, or Twisted `deferLater` if we want to do so later on. - - The problem with that is that the tasks will just stop and never be resumed if synapse - is stopped for whatever reason. - - How this works: - - A function mapped to a named action should first be registered with `register_action`. - This function will be called when trying to resuming tasks after a synapse shutdown, - so this registration should happen when synapse is initialised, NOT right before scheduling - a task. - - A task can then be launched using this named action with `schedule_task`. A `params` dict - can be passed, and it will be available to the registered function when launched. This task - can be launch either now-ish, or later on by giving a `timestamp` parameter. - - The function may call `update_task` at any time to update the `result` of the task, - and this can be used to resume the task at a specific point and/or to convey a result to - the code launching the task. - You can also specify the `result` (and/or an `error`) when returning from the function. - - The reconciliation loop runs every minute, so this is not a precise scheduler. - There is a limit of 10 concurrent tasks, so tasks may be delayed if the pool is already - full. In this regard, please take great care that scheduled tasks can actually finished. - For now there is no mechanism to stop a running task if it is stuck. - - Tasks will be run on the worker specified with `run_background_tasks_on` config, - or the main one by default. + This is a simple task scheduler designed for resumable tasks. Normally, + you'd use `run_in_background` to start a background task or Twisted's + `deferLater` if you want to run it later. + + The issue is that these tasks stop completely and won't resume if Synapse is + shut down for any reason. + + Here's how it works: + + - Register an Action: First, you need to register a function to a named + action using `register_action`. This function will be called to resume tasks + after a Synapse shutdown. Make sure to register it when Synapse initializes, + not right before scheduling the task. + + - Schedule a Task: You can launch a task linked to the named action + using `schedule_task`. You can pass a `params` dictionary, which will be + passed to the registered function when it's executed. Tasks can be scheduled + to run either immediately or later by specifying a `timestamp`. + + - Update Task: The function handling the task can call `update_task` at + any point to update the task's `result`. This lets you resume the task from + a specific point or pass results back to the code that scheduled it. When + the function completes, you can also return a `result` or an `error`. + + Things to keep in mind: + + - The reconciliation loop runs every minute, so this is not a high-precision + scheduler. + + - Only 10 tasks can run at the same time. If the pool is full, tasks may be + delayed. Make sure your scheduled tasks can actually finish. + + - Currently, there's no way to stop a task if it gets stuck. + + - Tasks will run on the worker defined by the `run_background_tasks_on` + setting in your configuration. If no worker is specified, they'll run on + the main one by default. """ # Precision of the scheduler, evaluation of tasks to run will only happen @@ -157,7 +167,7 @@ class TaskScheduler: params: Optional[JsonMapping] = None, ) -> str: """Schedule a new potentially resumable task. A function matching the specified - `action` should have be registered with `register_action` before the task is run. + `action` should've been registered with `register_action` before the task is run. Args: action: the name of a previously registered action @@ -174,9 +184,10 @@ class TaskScheduler: The id of the scheduled task """ status = TaskStatus.SCHEDULED + start_now = False if timestamp is None or timestamp < self._clock.time_msec(): timestamp = self._clock.time_msec() - status = TaskStatus.ACTIVE + start_now = True task = ScheduledTask( random_string(16), @@ -190,9 +201,11 @@ class TaskScheduler: ) await self._store.insert_scheduled_task(task) - if status == TaskStatus.ACTIVE: + # If the task is ready to run immediately, run the scheduling algorithm now + # rather than waiting + if start_now: if self._run_background_tasks: - await self._launch_task(task) + self._launch_scheduled_tasks() else: self._hs.get_replication_command_handler().send_new_active_task(task.id) @@ -207,15 +220,15 @@ class TaskScheduler: result: Optional[JsonMapping] = None, error: Optional[str] = None, ) -> bool: - """Update some task associated values. This is exposed publicly so it can - be used inside task functions, mainly to update the result and be able to - resume a task at a specific step after a restart of synapse. + """Update some task-associated values. This is exposed publicly so it can + be used inside task functions, mainly to update the result or resume + a task at a specific step after a restart of synapse. It can also be used to stage a task, by setting the `status` to `SCHEDULED` with a new timestamp. - The `status` can only be set to `ACTIVE` or `SCHEDULED`, `COMPLETE` and `FAILED` - are terminal status and can only be set by returning it in the function. + The `status` can only be set to `ACTIVE` or `SCHEDULED`. `COMPLETE` and `FAILED` + are terminal statuses and can only be set by returning them from the function. Args: id: the id of the task to update @@ -223,6 +236,12 @@ class TaskScheduler: status: the new `TaskStatus` of the task result: the new result of the task error: the new error of the task + + Returns: + True if the update was successful, False otherwise. + + Raises: + Exception: If a status other than `ACTIVE` or `SCHEDULED` was passed. """ if status == TaskStatus.COMPLETE or status == TaskStatus.FAILED: raise Exception( @@ -260,9 +279,9 @@ class TaskScheduler: max_timestamp: Optional[int] = None, limit: Optional[int] = None, ) -> List[ScheduledTask]: - """Get a list of tasks. Returns all the tasks if no args is provided. + """Get a list of tasks. Returns all the tasks if no args are provided. - If an arg is `None` all tasks matching the other args will be selected. + If an arg is `None`, all tasks matching the other args will be selected. If an arg is an empty list, the corresponding value of the task needs to be `None` to be selected. @@ -274,8 +293,8 @@ class TaskScheduler: a timestamp inferior to the specified one limit: Only return `limit` number of rows if set. - Returns - A list of `ScheduledTask`, ordered by increasing timestamps + Returns: + A list of `ScheduledTask`, ordered by increasing timestamps. """ return await self._store.get_scheduled_tasks( actions=actions, @@ -300,23 +319,13 @@ class TaskScheduler: raise Exception(f"Task {id} is currently ACTIVE and can't be deleted") await self._store.delete_scheduled_task(id) - def launch_task_by_id(self, id: str) -> None: - """Try launching the task with the given ID.""" - # Don't bother trying to launch new tasks if we're already at capacity. - if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: - return - - run_as_background_process("launch_task_by_id", self._launch_task_by_id, id) + def on_new_task(self, task_id: str) -> None: + """Handle a notification that a new ready-to-run task has been added to the queue""" + # Just run the scheduler + self._launch_scheduled_tasks() - async def _launch_task_by_id(self, id: str) -> None: - """Helper async function for `launch_task_by_id`.""" - task = await self.get_task(id) - if task: - await self._launch_task(task) - - @wrap_as_background_process("launch_scheduled_tasks") - async def _launch_scheduled_tasks(self) -> None: - """Retrieve and launch scheduled tasks that should be running at that time.""" + def _launch_scheduled_tasks(self) -> None: + """Retrieve and launch scheduled tasks that should be running at this time.""" # Don't bother trying to launch new tasks if we're already at capacity. if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: return @@ -326,20 +335,26 @@ class TaskScheduler: self._launching_new_tasks = True - try: - for task in await self.get_tasks( - statuses=[TaskStatus.ACTIVE], limit=self.MAX_CONCURRENT_RUNNING_TASKS - ): - await self._launch_task(task) - for task in await self.get_tasks( - statuses=[TaskStatus.SCHEDULED], - max_timestamp=self._clock.time_msec(), - limit=self.MAX_CONCURRENT_RUNNING_TASKS, - ): - await self._launch_task(task) - - finally: - self._launching_new_tasks = False + async def inner() -> None: + try: + for task in await self.get_tasks( + statuses=[TaskStatus.ACTIVE], + limit=self.MAX_CONCURRENT_RUNNING_TASKS, + ): + # _launch_task will ignore tasks that we're already running, and + # will also do nothing if we're already at the maximum capacity. + await self._launch_task(task) + for task in await self.get_tasks( + statuses=[TaskStatus.SCHEDULED], + max_timestamp=self._clock.time_msec(), + limit=self.MAX_CONCURRENT_RUNNING_TASKS, + ): + await self._launch_task(task) + + finally: + self._launching_new_tasks = False + + run_as_background_process("launch_scheduled_tasks", inner) @wrap_as_background_process("clean_scheduled_tasks") async def _clean_scheduled_tasks(self) -> None: diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py deleted file mode 100644
index 5c9193e8a9..0000000000 --- a/synapse/util/threepids.py +++ /dev/null
@@ -1,123 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -import logging -import re -import typing - -if typing.TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -# it's unclear what the maximum length of an email address is. RFC3696 (as corrected -# by errata) says: -# the upper limit on address lengths should normally be considered to be 254. -# -# In practice, mail servers appear to be more tolerant and allow 400 characters -# or so. Let's allow 500, which should be plenty for everyone. -# -MAX_EMAIL_ADDRESS_LENGTH = 500 - - -async def check_3pid_allowed( - hs: "HomeServer", - medium: str, - address: str, - registration: bool = False, -) -> bool: - """Checks whether a given format of 3PID is allowed to be used on this HS - - Args: - hs: server - medium: 3pid medium - e.g. email, msisdn - address: address within that medium (e.g. "wotan@matrix.org") - msisdns need to first have been canonicalised - registration: whether we want to bind the 3PID as part of registering a new user. - - Returns: - whether the 3PID medium/address is allowed to be added to this HS - """ - if not await hs.get_password_auth_provider().is_3pid_allowed( - medium, address, registration - ): - return False - - if hs.config.registration.allowed_local_3pids: - for constraint in hs.config.registration.allowed_local_3pids: - logger.debug( - "Checking 3PID %s (%s) against %s (%s)", - address, - medium, - constraint["pattern"], - constraint["medium"], - ) - if medium == constraint["medium"] and re.match( - constraint["pattern"], address - ): - return True - else: - return True - - return False - - -def canonicalise_email(address: str) -> str: - """'Canonicalise' email address - Case folding of local part of email address and lowercase domain part - See MSC2265, https://github.com/matrix-org/matrix-doc/pull/2265 - - Args: - address: email address to be canonicalised - Returns: - The canonical form of the email address - Raises: - ValueError if the address could not be parsed. - """ - - address = address.strip() - - parts = address.split("@") - if len(parts) != 2: - logger.debug("Couldn't parse email address %s", address) - raise ValueError("Unable to parse email address") - - return parts[0].casefold() + "@" + parts[1].lower() - - -def validate_email(address: str) -> str: - """Does some basic validation on an email address. - - Returns the canonicalised email, as returned by `canonicalise_email`. - - Raises a ValueError if the email is invalid. - """ - # First we try canonicalising in case that fails - address = canonicalise_email(address) - - # Email addresses have to be at least 3 characters. - if len(address) < 3: - raise ValueError("Unable to parse email address") - - if len(address) > MAX_EMAIL_ADDRESS_LENGTH: - raise ValueError("Unable to parse email address") - - return address diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py
index 44b109bdfd..95eb1d7185 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py
@@ -47,7 +47,6 @@ class WheelTimer(Generic[T]): """ self.bucket_size: int = bucket_size self.entries: List[_Entry[T]] = [] - self.current_tick: int = 0 def insert(self, now: int, obj: T, then: int) -> None: """Inserts object into timer. @@ -78,11 +77,10 @@ class WheelTimer(Generic[T]): self.entries[max(min_key, then_key) - min_key].elements.add(obj) return - next_key = now_key + 1 if self.entries: - last_key = self.entries[-1].end_key + last_key = self.entries[-1].end_key + 1 else: - last_key = next_key + last_key = now_key + 1 # Handle the case when `then` is in the past and `entries` is empty. then_key = max(last_key, then_key) diff --git a/synapse/visibility.py b/synapse/visibility.py
index 128413c8aa..dc7b6e4065 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py
@@ -27,7 +27,6 @@ from typing import ( Final, FrozenSet, List, - Mapping, Optional, Sequence, Set, @@ -48,6 +47,7 @@ from synapse.events.utils import clone_event, prune_event from synapse.logging.opentracing import trace from synapse.storage.controllers import StorageControllers from synapse.storage.databases.main import DataStore +from synapse.synapse_rust.events import event_visible_to_server from synapse.types import RetentionPolicy, StateMap, StrCollection, get_domain_from_id from synapse.types.state import StateFilter from synapse.util import Clock @@ -135,9 +135,9 @@ async def filter_events_for_client( retention_policies: Dict[str, RetentionPolicy] = {} for room_id in room_ids: - retention_policies[room_id] = ( - await storage.main.get_retention_policy_for_room(room_id) - ) + retention_policies[ + room_id + ] = await storage.main.get_retention_policy_for_room(room_id) def allowed(event: EventBase) -> Optional[EventBase]: state_after_event = event_id_to_state.get(event.event_id) @@ -628,17 +628,6 @@ async def filter_events_for_server( """Filter a list of events based on whether the target server is allowed to see them. - For a fully stated room, the target server is allowed to see an event E if: - - the state at E has world readable or shared history vis, OR - - the state at E says that the target server is in the room. - - For a partially stated room, the target server is allowed to see E if: - - E was created by this homeserver, AND: - - the partial state at E has world readable or shared history vis, OR - - the partial state at E says that the target server is in the room. - - TODO: state before or state after? - Args: storage target_server_name @@ -655,35 +644,6 @@ async def filter_events_for_server( The filtered events. """ - def is_sender_erased(event: EventBase, erased_senders: Mapping[str, bool]) -> bool: - if erased_senders and erased_senders[event.sender]: - logger.info("Sender of %s has been erased, redacting", event.event_id) - return True - return False - - def check_event_is_visible( - visibility: str, memberships: StateMap[EventBase] - ) -> bool: - if visibility not in (HistoryVisibility.INVITED, HistoryVisibility.JOINED): - return True - - # We now loop through all membership events looking for - # membership states for the requesting server to determine - # if the server is either in the room or has been invited - # into the room. - for ev in memberships.values(): - assert get_domain_from_id(ev.state_key) == target_server_name - - memtype = ev.membership - if memtype == Membership.JOIN: - return True - elif memtype == Membership.INVITE: - if visibility == HistoryVisibility.INVITED: - return True - - # server has no users in the room: redact - return False - if filter_out_erased_senders: erased_senders = await storage.main.are_users_erased(e.sender for e in events) else: @@ -726,20 +686,16 @@ async def filter_events_for_server( target_server_name, ) - def include_event_in_output(e: EventBase) -> bool: - erased = is_sender_erased(e, erased_senders) - visible = check_event_is_visible( - event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {}) - ) - - if e.event_id in partial_state_invisible_event_ids: - visible = False - - return visible and not erased - to_return = [] for e in events: - if include_event_in_output(e): + if event_visible_to_server( + sender=e.sender, + target_server_name=target_server_name, + history_visibility=event_to_history_vis[e.event_id], + erased_senders=erased_senders, + partial_state_invisible=e.event_id in partial_state_invisible_event_ids, + memberships=list(event_to_memberships.get(e.event_id, {}).values()), + ): to_return.append(e) elif redact: to_return.append(prune_event(e)) @@ -796,7 +752,7 @@ async def _event_to_history_vis( async def _event_to_memberships( storage: StorageControllers, events: Collection[EventBase], server_name: str -) -> Dict[str, StateMap[EventBase]]: +) -> Dict[str, StateMap[Tuple[str, str]]]: """Get the remote membership list at each of the given events Returns a map from event id to state map, which will contain only membership events @@ -849,7 +805,7 @@ async def _event_to_memberships( return { e_id: { - key: event_map[inner_e_id] + key: (event_map[inner_e_id].state_key, event_map[inner_e_id].membership) for key, inner_e_id in key_to_eid.items() if inner_e_id in event_map } diff --git a/synmark/__init__.py b/synmark/__init__.py
index 8c47e50c7c..887fec2f96 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py
@@ -27,7 +27,9 @@ from synapse.types import ISynapseReactor try: from twisted.internet.epollreactor import EPollReactor as Reactor except ImportError: - from twisted.internet.pollreactor import PollReactor as Reactor # type: ignore[assignment] + from twisted.internet.pollreactor import ( # type: ignore[assignment] + PollReactor as Reactor, + ) from twisted.internet.main import installReactor diff --git a/synmark/__main__.py b/synmark/__main__.py
index cac57cf111..4944c2f3b0 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py
@@ -40,7 +40,7 @@ T = TypeVar("T") def make_test( - main: Callable[[ISynapseReactor, int], Coroutine[Any, Any, float]] + main: Callable[[ISynapseReactor, int], Coroutine[Any, Any, float]], ) -> Callable[[int], float]: """ Take a benchmark function and wrap it in a reactor start and stop. @@ -90,6 +90,10 @@ if __name__ == "__main__": if runner.args.worker: if runner.args.log: + # sys.__stdout__ can technically be None, just exit if it's the case + if not sys.__stdout__: + exit(1) + globalLogBeginner.beginLoggingTo( [textFileLogObserver(sys.__stdout__)], redirectStandardIO=False ) diff --git a/test.sh b/test.sh new file mode 100755
index 0000000000..e30ff6b766 --- /dev/null +++ b/test.sh
@@ -0,0 +1,2 @@ +#! /usr/bin/env sh +poetry run trial -j`nproc` tests/ diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py
index bd229cf7e9..751bf599ae 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py
@@ -484,23 +484,6 @@ class AuthTestCase(unittest.HomeserverTestCase): ResourceLimitError, ) - def test_reserved_threepid(self) -> None: - self.auth_blocking._limit_usage_by_mau = True - self.auth_blocking._max_mau_value = 1 - self.store.get_monthly_active_count = AsyncMock(return_value=2) - threepid = {"medium": "email", "address": "reserved@server.com"} - unknown_threepid = {"medium": "email", "address": "unreserved@server.com"} - self.auth_blocking._mau_limits_reserved_threepids = [threepid] - - self.get_failure(self.auth_blocking.check_auth_blocking(), ResourceLimitError) - - self.get_failure( - self.auth_blocking.check_auth_blocking(threepid=unknown_threepid), - ResourceLimitError, - ) - - self.get_success(self.auth_blocking.check_auth_blocking(threepid=threepid)) - def test_hs_disabled(self) -> None: self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py
index a59e168db1..93f4f98916 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py
@@ -1,6 +1,10 @@ +from typing import Optional + from synapse.api.ratelimiting import LimitExceededError, Ratelimiter from synapse.appservice import ApplicationService from synapse.config.ratelimiting import RatelimitSettings +from synapse.module_api import RatelimitOverride +from synapse.module_api.callbacks.ratelimit_callbacks import RatelimitModuleApiCallbacks from synapse.types import create_requester from tests import unittest @@ -220,9 +224,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): self.assertIn("test_id_1", limiter.actions) - self.get_success_or_raise( - limiter.can_do_action(None, key="test_id_2", _time_now_s=10) - ) + self.reactor.advance(60) self.assertNotIn("test_id_1", limiter.actions) @@ -442,3 +444,49 @@ class TestRatelimiter(unittest.HomeserverTestCase): limiter.can_do_action(requester=None, key="a", _time_now_s=20.0) ) self.assertTrue(success) + + def test_get_ratelimit_override_for_user_callback(self) -> None: + test_user_id = "@user:test" + test_limiter_name = "name" + callbacks = RatelimitModuleApiCallbacks(self.hs) + requester = create_requester(test_user_id) + limiter = Ratelimiter( + store=self.hs.get_datastores().main, + clock=self.clock, + cfg=RatelimitSettings( + test_limiter_name, + per_second=0.1, + burst_count=3, + ), + ratelimit_callbacks=callbacks, + ) + + # Observe four actions, exceeding the burst_count. + limiter.record_action(requester=requester, n_actions=4, _time_now_s=0.0) + + # We should be prevented from taking a new action now. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=requester, _time_now_s=0.0) + ) + self.assertFalse(success) + + # Now register a callback that overrides the ratelimit for this user + # and limiter name. + async def get_ratelimit_override_for_user( + user_id: str, limiter_name: str + ) -> Optional[RatelimitOverride]: + if user_id == test_user_id: + return RatelimitOverride( + per_second=0.1, + burst_count=10, + ) + return None + + callbacks.register_callbacks( + get_ratelimit_override_for_user=get_ratelimit_override_for_user + ) + + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=requester, _time_now_s=0.0) + ) + self.assertTrue(success) diff --git a/tests/api/test_urls.py b/tests/api/test_urls.py new file mode 100644
index 0000000000..ce156a05dc --- /dev/null +++ b/tests/api/test_urls.py
@@ -0,0 +1,55 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.urls import LoginSSORedirectURIBuilder +from synapse.server import HomeServer +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + +# a (valid) url with some annoying characters in. %3D is =, %26 is &, %2B is + +TRICKY_TEST_CLIENT_REDIRECT_URL = 'https://x?<ab c>&q"+%3D%2B"="fö%26=o"' + + +class LoginSSORedirectURIBuilderTestCase(HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.login_sso_redirect_url_builder = LoginSSORedirectURIBuilder(hs.config) + + def test_no_idp_id(self) -> None: + self.assertEqual( + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id=None, client_redirect_url="http://example.com/redirect" + ), + "https://test/_matrix/client/v3/login/sso/redirect?redirectUrl=http%3A%2F%2Fexample.com%2Fredirect", + ) + + def test_explicit_idp_id(self) -> None: + self.assertEqual( + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id="oidc-github", client_redirect_url="http://example.com/redirect" + ), + "https://test/_matrix/client/v3/login/sso/redirect/oidc-github?redirectUrl=http%3A%2F%2Fexample.com%2Fredirect", + ) + + def test_tricky_redirect_uri(self) -> None: + self.assertEqual( + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id="oidc-github", + client_redirect_url=TRICKY_TEST_CLIENT_REDIRECT_URL, + ), + "https://test/_matrix/client/v3/login/sso/redirect/oidc-github?redirectUrl=https%3A%2F%2Fx%3F%3Cab+c%3E%26q%22%2B%253D%252B%22%3D%22f%C3%B6%2526%3Do%22", + ) diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py
index a1c7ccdd0b..a5bf7e0635 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py
@@ -2,7 +2,7 @@ # This file is licensed under the Affero General Public License (AGPL) version 3. # # Copyright 2015, 2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023, 2025 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -150,7 +150,8 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): self.assertEqual(1, len(self.txnctrl.recoverers)) # and stored self.assertEqual(0, txn.complete.call_count) # txn not completed self.store.set_appservice_state.assert_called_once_with( - service, ApplicationServiceState.DOWN # service marked as down + service, + ApplicationServiceState.DOWN, # service marked as down ) @@ -233,6 +234,41 @@ class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): self.assertEqual(1, txn.complete.call_count) self.callback.assert_called_once_with(self.recoverer) + def test_recover_force_retry(self) -> None: + txn = Mock() + txns = [txn, None] + pop_txn = False + + def take_txn( + *args: object, **kwargs: object + ) -> "defer.Deferred[Optional[Mock]]": + if pop_txn: + return defer.succeed(txns.pop(0)) + else: + return defer.succeed(txn) + + self.store.get_oldest_unsent_txn = Mock(side_effect=take_txn) + + # Start the recovery, and then fail the first attempt. + self.recoverer.recover() + self.assertEqual(0, self.store.get_oldest_unsent_txn.call_count) + txn.send = AsyncMock(return_value=False) + txn.complete = AsyncMock(return_value=None) + self.clock.advance_time(2) + self.assertEqual(1, txn.send.call_count) + self.assertEqual(0, txn.complete.call_count) + self.assertEqual(0, self.callback.call_count) + + # Now allow the send to succeed, and force a retry. + pop_txn = True # returns the txn the first time, then no more. + txn.send = AsyncMock(return_value=True) # successfully send the txn + self.recoverer.force_retry() + self.assertEqual(1, txn.send.call_count) # new mock reset call count + self.assertEqual(1, txn.complete.call_count) + + # Ensure we call the callback to say we're done! + self.callback.assert_called_once_with(self.recoverer) + # Corresponds to synapse.appservice.scheduler._TransactionController.send TxnCtrlArgs: TypeAlias = """ diff --git a/tests/config/test_api.py b/tests/config/test_api.py
index 6773c9a277..e6cc3e21ed 100644 --- a/tests/config/test_api.py +++ b/tests/config/test_api.py
@@ -3,6 +3,7 @@ from unittest import TestCase as StdlibTestCase import yaml from synapse.config import ConfigError +from synapse.config._base import RootConfig from synapse.config.api import ApiConfig from synapse.types.state import StateFilter @@ -19,7 +20,7 @@ DEFAULT_PREJOIN_STATE_PAIRS = { class TestRoomPrejoinState(StdlibTestCase): def read_config(self, source: str) -> ApiConfig: - config = ApiConfig() + config = ApiConfig(RootConfig()) config.read_config(yaml.safe_load(source)) return config diff --git a/tests/config/test_appservice.py b/tests/config/test_appservice.py
index e3021b59d8..2572681224 100644 --- a/tests/config/test_appservice.py +++ b/tests/config/test_appservice.py
@@ -19,6 +19,7 @@ # # +from synapse.config._base import RootConfig from synapse.config.appservice import AppServiceConfig, ConfigError from tests.unittest import TestCase @@ -36,12 +37,12 @@ class AppServiceConfigTest(TestCase): ["foo", "bar", False], ]: with self.assertRaises(ConfigError): - AppServiceConfig().read_config( + AppServiceConfig(RootConfig()).read_config( {"app_service_config_files": invalid_value} ) def test_valid_app_service_config_files(self) -> None: - AppServiceConfig().read_config({"app_service_config_files": []}) - AppServiceConfig().read_config( + AppServiceConfig(RootConfig()).read_config({"app_service_config_files": []}) + AppServiceConfig(RootConfig()).read_config( {"app_service_config_files": ["/not/a/real/path", "/not/a/real/path/2"]} ) diff --git a/tests/config/test_cache.py b/tests/config/test_cache.py
index 631263b5ca..aead73e059 100644 --- a/tests/config/test_cache.py +++ b/tests/config/test_cache.py
@@ -19,6 +19,7 @@ # # +from synapse.config._base import RootConfig from synapse.config.cache import CacheConfig, add_resizable_cache from synapse.types import JsonDict from synapse.util.caches.lrucache import LruCache @@ -29,7 +30,7 @@ from tests.unittest import TestCase class CacheConfigTests(TestCase): def setUp(self) -> None: # Reset caches before each test since there's global state involved. - self.config = CacheConfig() + self.config = CacheConfig(RootConfig()) self.config.reset() def tearDown(self) -> None: diff --git a/tests/config/test_database.py b/tests/config/test_database.py
index b46519f84a..3fa5fff2b2 100644 --- a/tests/config/test_database.py +++ b/tests/config/test_database.py
@@ -20,6 +20,7 @@ import yaml +from synapse.config._base import RootConfig from synapse.config.database import DatabaseConfig from tests import unittest @@ -28,7 +29,9 @@ from tests import unittest class DatabaseConfigTestCase(unittest.TestCase): def test_database_configured_correctly(self) -> None: conf = yaml.safe_load( - DatabaseConfig().generate_config_section(data_dir_path="/data_dir_path") + DatabaseConfig(RootConfig()).generate_config_section( + data_dir_path="/data_dir_path" + ) ) expected_database_conf = { diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index 479d2aab91..e06a961b16 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py
@@ -19,17 +19,33 @@ # [This file includes modifications made by New Vector Limited] # # +import tempfile +from typing import Callable +from unittest import mock + import yaml +from parameterized import parameterized from synapse.config import ConfigError +from synapse.config._base import RootConfig from synapse.config.homeserver import HomeServerConfig from tests.config.utils import ConfigFileTestCase +try: + import authlib +except ImportError: + authlib = None + +try: + import hiredis +except ImportError: + hiredis = None # type: ignore + class ConfigLoadingFileTestCase(ConfigFileTestCase): def test_load_fails_if_server_name_missing(self) -> None: - self.generate_config_and_remove_lines_containing("server_name") + self.generate_config_and_remove_lines_containing(["server_name"]) with self.assertRaises(ConfigError): HomeServerConfig.load_config("", ["-c", self.config_file]) with self.assertRaises(ConfigError): @@ -66,7 +82,7 @@ class ConfigLoadingFileTestCase(ConfigFileTestCase): ) def test_load_succeeds_if_macaroon_secret_key_missing(self) -> None: - self.generate_config_and_remove_lines_containing("macaroon") + self.generate_config_and_remove_lines_containing(["macaroon"]) config1 = HomeServerConfig.load_config("", ["-c", self.config_file]) config2 = HomeServerConfig.load_config("", ["-c", self.config_file]) config3 = HomeServerConfig.load_or_generate_config("", ["-c", self.config_file]) @@ -101,18 +117,180 @@ class ConfigLoadingFileTestCase(ConfigFileTestCase): self.assertTrue(config3.registration.enable_registration) def test_stats_enabled(self) -> None: - self.generate_config_and_remove_lines_containing("enable_metrics") + self.generate_config_and_remove_lines_containing(["enable_metrics"]) self.add_lines_to_config(["enable_metrics: true"]) # The default Metrics Flags are off by default. config = HomeServerConfig.load_config("", ["-c", self.config_file]) self.assertFalse(config.metrics.metrics_flags.known_servers) - def test_depreciated_identity_server_flag_throws_error(self) -> None: + @parameterized.expand( + [ + "turn_shared_secret_path: /does/not/exist", + "registration_shared_secret_path: /does/not/exist", + "macaroon_secret_key_path: /does/not/exist", + "form_secret_path: /does/not/exist", + "worker_replication_secret_path: /does/not/exist", + "experimental_features:\n msc3861:\n client_secret_path: /does/not/exist", + "experimental_features:\n msc3861:\n admin_token_path: /does/not/exist", + *["redis:\n enabled: true\n password_path: /does/not/exist"] + * (hiredis is not None), + ] + ) + def test_secret_files_missing(self, config_str: str) -> None: self.generate_config() - # Needed to ensure that actual key/value pair added below don't end up on a line with a comment - self.add_lines_to_config([" "]) - # Check that presence of "trust_identity_server_for_password" throws config error - self.add_lines_to_config(["trust_identity_server_for_password_resets: true"]) + self.add_lines_to_config(["", config_str]) + with self.assertRaises(ConfigError): HomeServerConfig.load_config("", ["-c", self.config_file]) + + @parameterized.expand( + [ + ( + "turn_shared_secret_path: {}", + lambda c: c.voip.turn_shared_secret.encode("utf-8"), + ), + ( + "registration_shared_secret_path: {}", + lambda c: c.registration.registration_shared_secret.encode("utf-8"), + ), + ( + "macaroon_secret_key_path: {}", + lambda c: c.key.macaroon_secret_key, + ), + ( + "form_secret_path: {}", + lambda c: c.key.form_secret.encode("utf-8"), + ), + ( + "worker_replication_secret_path: {}", + lambda c: c.worker.worker_replication_secret.encode("utf-8"), + ), + ( + "experimental_features:\n msc3861:\n client_secret_path: {}", + lambda c: c.experimental.msc3861.client_secret().encode("utf-8"), + ), + ( + "experimental_features:\n msc3861:\n admin_token_path: {}", + lambda c: c.experimental.msc3861.admin_token().encode("utf-8"), + ), + *[ + ( + "redis:\n enabled: true\n password_path: {}", + lambda c: c.redis.redis_password.encode("utf-8"), + ) + ] + * (hiredis is not None), + ] + ) + def test_secret_files_existing( + self, config_line: str, get_secret: Callable[[RootConfig], str] + ) -> None: + self.generate_config_and_remove_lines_containing( + ["form_secret", "macaroon_secret_key", "registration_shared_secret"] + ) + with tempfile.NamedTemporaryFile(buffering=0) as secret_file: + secret_file.write(b"53C237") + + self.add_lines_to_config(["", config_line.format(secret_file.name)]) + config = HomeServerConfig.load_config("", ["-c", self.config_file]) + + self.assertEqual(get_secret(config), b"53C237") + + @parameterized.expand( + [ + "turn_shared_secret: 53C237", + "registration_shared_secret: 53C237", + "macaroon_secret_key: 53C237", + "recaptcha_private_key: 53C237", + "recaptcha_public_key: ¬53C237", + "form_secret: 53C237", + "worker_replication_secret: 53C237", + *[ + "experimental_features:\n" + " msc3861:\n" + " enabled: true\n" + " client_secret: 53C237" + ] + * (authlib is not None), + *[ + "experimental_features:\n" + " msc3861:\n" + " enabled: true\n" + " client_auth_method: private_key_jwt\n" + ' jwk: {{"mock": "mock"}}' + ] + * (authlib is not None), + *[ + "experimental_features:\n" + " msc3861:\n" + " enabled: true\n" + " admin_token: 53C237\n" + " client_secret_path: {secret_file}" + ] + * (authlib is not None), + *["redis:\n enabled: true\n password: 53C237"] * (hiredis is not None), + ] + ) + def test_no_secrets_in_config(self, config_line: str) -> None: + if authlib is not None: + patcher = mock.patch("authlib.jose.rfc7517.JsonWebKey.import_key") + self.addCleanup(patcher.stop) + patcher.start() + + with tempfile.NamedTemporaryFile(buffering=0) as secret_file: + # Only used for less mocking with admin_token + secret_file.write(b"53C237") + + self.generate_config_and_remove_lines_containing( + ["form_secret", "macaroon_secret_key", "registration_shared_secret"] + ) + # Check strict mode with no offenders. + HomeServerConfig.load_config( + "", ["-c", self.config_file, "--no-secrets-in-config"] + ) + self.add_lines_to_config( + ["", config_line.format(secret_file=secret_file.name)] + ) + # Check strict mode with a single offender. + with self.assertRaises(ConfigError): + HomeServerConfig.load_config( + "", ["-c", self.config_file, "--no-secrets-in-config"] + ) + + # Check lenient mode with a single offender. + HomeServerConfig.load_config("", ["-c", self.config_file]) + + def test_no_secrets_in_config_but_in_files(self) -> None: + with tempfile.NamedTemporaryFile(buffering=0) as secret_file: + secret_file.write(b"53C237") + + self.generate_config_and_remove_lines_containing( + ["form_secret", "macaroon_secret_key", "registration_shared_secret"] + ) + self.add_lines_to_config( + [ + "", + f"turn_shared_secret_path: {secret_file.name}", + f"registration_shared_secret_path: {secret_file.name}", + f"macaroon_secret_key_path: {secret_file.name}", + f"recaptcha_private_key_path: {secret_file.name}", + f"recaptcha_public_key_path: {secret_file.name}", + f"form_secret_path: {secret_file.name}", + f"worker_replication_secret_path: {secret_file.name}", + *[ + "experimental_features:\n" + " msc3861:\n" + " enabled: true\n" + f" admin_token_path: {secret_file.name}\n" + f" client_secret_path: {secret_file.name}\n" + # f" jwk_path: {secret_file.name}" + ] + * (authlib is not None), + *[f"redis:\n enabled: true\n password_path: {secret_file.name}"] + * (hiredis is not None), + ] + ) + HomeServerConfig.load_config( + "", ["-c", self.config_file, "--no-secrets-in-config"] + ) diff --git a/tests/config/test_oauth_delegation.py b/tests/config/test_oauth_delegation.py
index 713bddeb90..e1e4e008dd 100644 --- a/tests/config/test_oauth_delegation.py +++ b/tests/config/test_oauth_delegation.py
@@ -205,17 +205,6 @@ class MSC3861OAuthDelegation(TestCase): with self.assertRaises(ConfigError): self.parse_config() - def test_cas_sso_cannot_be_enabled(self) -> None: - self.config_dict["cas_config"] = { - "enabled": True, - "server_url": "https://cas-server.com", - "displayname_attribute": "name", - "required_attributes": {"userGroup": "staff", "department": "None"}, - } - - with self.assertRaises(ConfigError): - self.parse_config() - def test_auth_providers_cannot_be_enabled(self) -> None: self.config_dict["modules"] = [ { @@ -270,8 +259,3 @@ class MSC3861OAuthDelegation(TestCase): self.config_dict["session_lifetime"] = "24h" with self.assertRaises(ConfigError): self.parse_config() - - def test_enable_3pid_changes_cannot_be_enabled(self) -> None: - self.config_dict["enable_3pid_changes"] = True - with self.assertRaises(ConfigError): - self.parse_config() diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py
index e25f7787f4..5208381279 100644 --- a/tests/config/test_room_directory.py +++ b/tests/config/test_room_directory.py
@@ -24,6 +24,7 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin import synapse.rest.client.login import synapse.rest.client.room +from synapse.config._base import RootConfig from synapse.config.room_directory import RoomDirectoryConfig from synapse.server import HomeServer from synapse.util import Clock @@ -63,7 +64,7 @@ class RoomDirectoryConfigTestCase(unittest.HomeserverTestCase): """ ) - rd_config = RoomDirectoryConfig() + rd_config = RoomDirectoryConfig(RootConfig()) rd_config.read_config(config) self.assertFalse( @@ -123,7 +124,7 @@ class RoomDirectoryConfigTestCase(unittest.HomeserverTestCase): """ ) - rd_config = RoomDirectoryConfig() + rd_config = RoomDirectoryConfig(RootConfig()) rd_config.read_config(config) self.assertFalse( diff --git a/tests/config/test_server.py b/tests/config/test_server.py
index 74073cfdc5..05faf8fcc9 100644 --- a/tests/config/test_server.py +++ b/tests/config/test_server.py
@@ -20,27 +20,16 @@ import yaml -from synapse.config._base import ConfigError -from synapse.config.server import ServerConfig, generate_ip_set, is_threepid_reserved +from synapse.config._base import ConfigError, RootConfig +from synapse.config.server import ServerConfig, generate_ip_set from tests import unittest class ServerConfigTestCase(unittest.TestCase): - def test_is_threepid_reserved(self) -> None: - user1 = {"medium": "email", "address": "user1@example.com"} - user2 = {"medium": "email", "address": "user2@example.com"} - user3 = {"medium": "email", "address": "user3@example.com"} - user1_msisdn = {"medium": "msisdn", "address": "447700000000"} - config = [user1, user2] - - self.assertTrue(is_threepid_reserved(config, user1)) - self.assertFalse(is_threepid_reserved(config, user3)) - self.assertFalse(is_threepid_reserved(config, user1_msisdn)) - def test_unsecure_listener_no_listeners_open_private_ports_false(self) -> None: conf = yaml.safe_load( - ServerConfig().generate_config_section( + ServerConfig(RootConfig()).generate_config_section( "CONFDIR", "/data_dir_path", "che.org", False, None ) ) @@ -60,7 +49,7 @@ class ServerConfigTestCase(unittest.TestCase): def test_unsecure_listener_no_listeners_open_private_ports_true(self) -> None: conf = yaml.safe_load( - ServerConfig().generate_config_section( + ServerConfig(RootConfig()).generate_config_section( "CONFDIR", "/data_dir_path", "che.org", True, None ) ) @@ -94,7 +83,7 @@ class ServerConfigTestCase(unittest.TestCase): ] conf = yaml.safe_load( - ServerConfig().generate_config_section( + ServerConfig(RootConfig()).generate_config_section( "CONFDIR", "/data_dir_path", "this.one.listens", True, listeners ) ) @@ -128,7 +117,7 @@ class ServerConfigTestCase(unittest.TestCase): expected_listeners[1]["bind_addresses"] = ["::1", "127.0.0.1"] conf = yaml.safe_load( - ServerConfig().generate_config_section( + ServerConfig(RootConfig()).generate_config_section( "CONFDIR", "/data_dir_path", "this.one.listens", True, listeners ) ) diff --git a/tests/config/test_workers.py b/tests/config/test_workers.py
index 64c0285d01..3a21975b89 100644 --- a/tests/config/test_workers.py +++ b/tests/config/test_workers.py
@@ -47,7 +47,7 @@ class WorkerDutyConfigTestCase(TestCase): "worker_app": worker_app, **extras, } - worker_config.read_config(worker_config_dict) + worker_config.read_config(worker_config_dict, allow_secrets_in_config=True) return worker_config def test_old_configs_master(self) -> None: diff --git a/tests/config/utils.py b/tests/config/utils.py
index 11140ff979..3cba4ac588 100644 --- a/tests/config/utils.py +++ b/tests/config/utils.py
@@ -51,12 +51,13 @@ class ConfigFileTestCase(unittest.TestCase): ], ) - def generate_config_and_remove_lines_containing(self, needle: str) -> None: + def generate_config_and_remove_lines_containing(self, needles: list[str]) -> None: self.generate_config() with open(self.config_file) as f: contents = f.readlines() - contents = [line for line in contents if needle not in line] + for needle in needles: + contents = [line for line in contents if needle not in line] with open(self.config_file, "w") as f: f.write("".join(contents)) diff --git a/tests/events/test_auto_accept_invites.py b/tests/events/test_auto_accept_invites.py
index 7fb4d4fa90..d2100e9903 100644 --- a/tests/events/test_auto_accept_invites.py +++ b/tests/events/test_auto_accept_invites.py
@@ -31,6 +31,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError +from synapse.config._base import RootConfig from synapse.config.auto_accept_invites import AutoAcceptInvitesConfig from synapse.events.auto_accept_invites import InviteAutoAccepter from synapse.federation.federation_base import event_from_pdu_json @@ -39,7 +40,7 @@ from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer -from synapse.types import StreamToken, create_requester +from synapse.types import StreamToken, UserID, UserInfo, create_requester from synapse.util import Clock from tests.handlers.test_sync import generate_sync_config @@ -349,6 +350,169 @@ class AutoAcceptInvitesTestCase(FederatingHomeserverTestCase): join_updates, _ = sync_join(self, invited_user_id) self.assertEqual(len(join_updates), 0) + @override_config( + { + "auto_accept_invites": { + "enabled": True, + }, + } + ) + async def test_ignore_invite_for_missing_user(self) -> None: + """Tests that receiving an invite for a missing user is ignored.""" + inviting_user_id = self.register_user("inviter", "pass") + inviting_user_tok = self.login("inviter", "pass") + + # A local user who receives an invite + invited_user_id = "@fake:" + self.hs.config.server.server_name + + # Create a room and send an invite to the other user + room_id = self.helper.create_room_as( + inviting_user_id, + tok=inviting_user_tok, + ) + + self.helper.invite( + room_id, + inviting_user_id, + invited_user_id, + tok=inviting_user_tok, + ) + + join_updates, _ = sync_join(self, inviting_user_id) + # Assert that the last event in the room was not a member event for the target user. + self.assertEqual( + join_updates[0].timeline.events[-1].content["membership"], "invite" + ) + + @override_config( + { + "auto_accept_invites": { + "enabled": True, + }, + } + ) + async def test_ignore_invite_for_deactivated_user(self) -> None: + """Tests that receiving an invite for a deactivated user is ignored.""" + inviting_user_id = self.register_user("inviter", "pass", admin=True) + inviting_user_tok = self.login("inviter", "pass") + + # A local user who receives an invite + invited_user_id = self.register_user("invitee", "pass") + + # Create a room and send an invite to the other user + room_id = self.helper.create_room_as( + inviting_user_id, + tok=inviting_user_tok, + ) + + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % invited_user_id, + {"deactivated": True}, + access_token=inviting_user_tok, + ) + + assert channel.code == 200 + + self.helper.invite( + room_id, + inviting_user_id, + invited_user_id, + tok=inviting_user_tok, + ) + + join_updates, b = sync_join(self, inviting_user_id) + # Assert that the last event in the room was not a member event for the target user. + self.assertEqual( + join_updates[0].timeline.events[-1].content["membership"], "invite" + ) + + @override_config( + { + "auto_accept_invites": { + "enabled": True, + }, + } + ) + async def test_ignore_invite_for_suspended_user(self) -> None: + """Tests that receiving an invite for a suspended user is ignored.""" + inviting_user_id = self.register_user("inviter", "pass", admin=True) + inviting_user_tok = self.login("inviter", "pass") + + # A local user who receives an invite + invited_user_id = self.register_user("invitee", "pass") + + # Create a room and send an invite to the other user + room_id = self.helper.create_room_as( + inviting_user_id, + tok=inviting_user_tok, + ) + + channel = self.make_request( + "PUT", + f"/_synapse/admin/v1/suspend/{invited_user_id}", + {"suspend": True}, + access_token=inviting_user_tok, + ) + + assert channel.code == 200 + + self.helper.invite( + room_id, + inviting_user_id, + invited_user_id, + tok=inviting_user_tok, + ) + + join_updates, b = sync_join(self, inviting_user_id) + # Assert that the last event in the room was not a member event for the target user. + self.assertEqual( + join_updates[0].timeline.events[-1].content["membership"], "invite" + ) + + @override_config( + { + "auto_accept_invites": { + "enabled": True, + }, + } + ) + async def test_ignore_invite_for_locked_user(self) -> None: + """Tests that receiving an invite for a suspended user is ignored.""" + inviting_user_id = self.register_user("inviter", "pass", admin=True) + inviting_user_tok = self.login("inviter", "pass") + + # A local user who receives an invite + invited_user_id = self.register_user("invitee", "pass") + + # Create a room and send an invite to the other user + room_id = self.helper.create_room_as( + inviting_user_id, + tok=inviting_user_tok, + ) + + channel = self.make_request( + "PUT", + f"/_synapse/admin/v2/users/{invited_user_id}", + {"locked": True}, + access_token=inviting_user_tok, + ) + + assert channel.code == 200 + + self.helper.invite( + room_id, + inviting_user_id, + invited_user_id, + tok=inviting_user_tok, + ) + + join_updates, b = sync_join(self, inviting_user_id) + # Assert that the last event in the room was not a member event for the target user. + self.assertEqual( + join_updates[0].timeline.events[-1].content["membership"], "invite" + ) + _request_key = 0 @@ -527,7 +691,7 @@ class InviteAutoAccepterInternalTestCase(TestCase): "only_from_local_users": True, } } - parsed_config = AutoAcceptInvitesConfig() + parsed_config = AutoAcceptInvitesConfig(RootConfig()) parsed_config.read_config(config) self.assertTrue(parsed_config.enabled) @@ -647,11 +811,27 @@ def create_module( module_api.is_mine.side_effect = lambda a: a.split(":")[1] == "test" module_api.worker_name = worker_name module_api.sleep.return_value = make_multiple_awaitable(None) + module_api.get_userinfo_by_id.return_value = UserInfo( + user_id=UserID.from_string("@user:test"), + is_admin=False, + is_guest=False, + consent_server_notice_sent=None, + consent_ts=None, + consent_version=None, + appservice_id=None, + creation_ts=0, + user_type=None, + is_deactivated=False, + locked=False, + is_shadow_banned=False, + approved=True, + suspended=False, + ) if config_override is None: config_override = {} - config = AutoAcceptInvitesConfig() + config = AutoAcceptInvitesConfig(RootConfig()) config.read_config(config_override) return InviteAutoAccepter(config, module_api) diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index 30f8787758..654e6521a2 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py
@@ -756,7 +756,8 @@ class SerializeEventTestCase(stdlib_unittest.TestCase): def test_event_fields_fail_if_fields_not_str(self) -> None: with self.assertRaises(TypeError): self.serialize( - MockEvent(room_id="!foo:bar", content={"foo": "bar"}), ["room_id", 4] # type: ignore[list-item] + MockEvent(room_id="!foo:bar", content={"foo": "bar"}), + ["room_id", 4], # type: ignore[list-item] ) diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py
index 9bd97e5d4e..87b9ffc0c6 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py
@@ -158,7 +158,9 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): async def get_current_state_event_counts(room_id: str) -> int: return 600 - self.hs.get_datastores().main.get_current_state_event_counts = get_current_state_event_counts # type: ignore[method-assign] + self.hs.get_datastores().main.get_current_state_event_counts = ( # type: ignore[method-assign] + get_current_state_event_counts + ) d = handler._remote_join( create_requester(u1), diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py
index 08214b0013..1e1ed8e642 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py
@@ -401,7 +401,10 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): now = self.clock.time_msec() self.get_success( self.hs.get_datastores().main.set_destination_retry_timings( - "zzzerver", now, now, 24 * 60 * 60 * 1000 # retry in 1 day + "zzzerver", + now, + now, + 24 * 60 * 60 * 1000, # retry in 1 day ) ) diff --git a/tests/federation/test_federation_devices.py b/tests/federation/test_federation_devices.py new file mode 100644
index 0000000000..ba27e69479 --- /dev/null +++ b/tests/federation/test_federation_devices.py
@@ -0,0 +1,161 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# Originally licensed under the Apache License, Version 2.0: +# <http://www.apache.org/licenses/LICENSE-2.0>. +# +# [This file includes modifications made by New Vector Limited] +# +# + +import logging +from unittest.mock import AsyncMock, Mock + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.handlers.device import DeviceListUpdater +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock +from synapse.util.retryutils import NotRetryingDestination + +from tests import unittest + +logger = logging.getLogger(__name__) + + +class DeviceListResyncTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = self.hs.get_datastores().main + + def test_retry_device_list_resync(self) -> None: + """Tests that device lists are marked as stale if they couldn't be synced, and + that stale device lists are retried periodically. + """ + remote_user_id = "@john:test_remote" + remote_origin = "test_remote" + + # Track the number of attempts to resync the user's device list. + self.resync_attempts = 0 + + # When this function is called, increment the number of resync attempts (only if + # we're querying devices for the right user ID), then raise a + # NotRetryingDestination error to fail the resync gracefully. + def query_user_devices( + destination: str, user_id: str, timeout: int = 30000 + ) -> JsonDict: + if user_id == remote_user_id: + self.resync_attempts += 1 + + raise NotRetryingDestination(0, 0, destination) + + # Register the mock on the federation client. + federation_client = self.hs.get_federation_client() + federation_client.query_user_devices = Mock(side_effect=query_user_devices) # type: ignore[method-assign] + + # Register a mock on the store so that the incoming update doesn't fail because + # we don't share a room with the user. + self.store.get_rooms_for_user = AsyncMock(return_value=["!someroom:test"]) + + # Manually inject a fake device list update. We need this update to include at + # least one prev_id so that the user's device list will need to be retried. + device_list_updater = self.hs.get_device_handler().device_list_updater + assert isinstance(device_list_updater, DeviceListUpdater) + self.get_success( + device_list_updater.incoming_device_list_update( + origin=remote_origin, + edu_content={ + "deleted": False, + "device_display_name": "Mobile", + "device_id": "QBUAZIFURK", + "prev_id": [5], + "stream_id": 6, + "user_id": remote_user_id, + }, + ) + ) + + # Check that there was one resync attempt. + self.assertEqual(self.resync_attempts, 1) + + # Check that the resync attempt failed and caused the user's device list to be + # marked as stale. + need_resync = self.get_success( + self.store.get_user_ids_requiring_device_list_resync() + ) + self.assertIn(remote_user_id, need_resync) + + # Check that waiting for 30 seconds caused Synapse to retry resyncing the device + # list. + self.reactor.advance(30) + self.assertEqual(self.resync_attempts, 2) + + def test_cross_signing_keys_retry(self) -> None: + """Tests that resyncing a device list correctly processes cross-signing keys from + the remote server. + """ + remote_user_id = "@john:test_remote" + remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" + remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" + + # Register mock device list retrieval on the federation client. + federation_client = self.hs.get_federation_client() + federation_client.query_user_devices = AsyncMock( # type: ignore[method-assign] + return_value={ + "user_id": remote_user_id, + "stream_id": 1, + "devices": [], + "master_key": { + "user_id": remote_user_id, + "usage": ["master"], + "keys": {"ed25519:" + remote_master_key: remote_master_key}, + }, + "self_signing_key": { + "user_id": remote_user_id, + "usage": ["self_signing"], + "keys": { + "ed25519:" + remote_self_signing_key: remote_self_signing_key + }, + }, + } + ) + + # Resync the device list. + device_handler = self.hs.get_device_handler() + self.get_success( + device_handler.device_list_updater.multi_user_device_resync( + [remote_user_id] + ), + ) + + # Retrieve the cross-signing keys for this user. + keys = self.get_success( + self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]), + ) + self.assertIn(remote_user_id, keys) + key = keys[remote_user_id] + assert key is not None + + # Check that the master key is the one returned by the mock. + master_key = key["master"] + self.assertEqual(len(master_key["keys"]), 1) + self.assertTrue("ed25519:" + remote_master_key in master_key["keys"].keys()) + self.assertTrue(remote_master_key in master_key["keys"].values()) + + # Check that the self-signing key is the one returned by the mock. + self_signing_key = key["self_signing"] + self.assertEqual(len(self_signing_key["keys"]), 1) + self.assertTrue( + "ed25519:" + remote_self_signing_key in self_signing_key["keys"].keys(), + ) + self.assertTrue(remote_self_signing_key in self_signing_key["keys"].values()) diff --git a/tests/federation/test_federation_media.py b/tests/federation/test_federation_media.py
index 0dcf20f5f5..9c92003ce5 100644 --- a/tests/federation/test_federation_media.py +++ b/tests/federation/test_federation_media.py
@@ -40,7 +40,6 @@ from tests.test_utils import SMALL_PNG class FederationMediaDownloadsTest(unittest.FederatingHomeserverTestCase): - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-") @@ -148,9 +147,47 @@ class FederationMediaDownloadsTest(unittest.FederatingHomeserverTestCase): found_file = any(SMALL_PNG in field for field in stripped_bytes) self.assertTrue(found_file) + def test_federation_etag(self) -> None: + """Test that federation ETags work""" + + content = io.BytesIO(b"file_to_stream") + content_uri = self.get_success( + self.media_repo.create_content( + "text/plain", + "test_upload", + content, + 46, + UserID.from_string("@user_id:whatever.org"), + ) + ) + + channel = self.make_signed_federation_request( + "GET", + f"/_matrix/federation/v1/media/download/{content_uri.media_id}", + ) + self.pump() + self.assertEqual(200, channel.code) + + # We expect exactly one ETag header. + etags = channel.headers.getRawHeaders("ETag") + self.assertIsNotNone(etags) + assert etags is not None # For mypy + self.assertEqual(len(etags), 1) + etag = etags[0] + + # Refetching with the etag should result in 304 and empty body. + channel = self.make_signed_federation_request( + "GET", + f"/_matrix/federation/v1/media/download/{content_uri.media_id}", + custom_headers=[("If-None-Match", etag)], + ) + self.pump() + self.assertEqual(channel.code, 304) + self.assertEqual(channel.is_finished(), True) + self.assertNotIn("body", channel.result) -class FederationThumbnailTest(unittest.FederatingHomeserverTestCase): +class FederationThumbnailTest(unittest.FederatingHomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-") diff --git a/tests/federation/test_federation_out_of_band_membership.py b/tests/federation/test_federation_out_of_band_membership.py new file mode 100644
index 0000000000..f77b8fe300 --- /dev/null +++ b/tests/federation/test_federation_out_of_band_membership.py
@@ -0,0 +1,671 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright 2020 The Matrix.org Foundation C.I.C. +# Copyright (C) 2023 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# Originally licensed under the Apache License, Version 2.0: +# <http://www.apache.org/licenses/LICENSE-2.0>. +# +# [This file includes modifications made by New Vector Limited] +# +# + +import logging +import time +import urllib.parse +from http import HTTPStatus +from typing import Any, Callable, Optional, Set, Tuple, TypeVar, Union +from unittest.mock import Mock + +import attr +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.constants import EventContentFields, EventTypes, Membership +from synapse.api.room_versions import RoomVersion, RoomVersions +from synapse.events import EventBase, make_event_from_dict +from synapse.events.utils import strip_event +from synapse.federation.federation_base import ( + event_from_pdu_json, +) +from synapse.federation.transport.client import SendJoinResponse +from synapse.http.matrixfederationclient import ( + ByteParser, +) +from synapse.http.types import QueryParams +from synapse.rest import admin +from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.types import JsonDict, MutableStateMap, StateMap +from synapse.types.handlers.sliding_sync import ( + StateValues, +) +from synapse.util import Clock + +from tests import unittest +from tests.utils import test_timeout + +logger = logging.getLogger(__name__) + + +def required_state_json_to_state_map(required_state: Any) -> StateMap[EventBase]: + state_map: MutableStateMap[EventBase] = {} + + # Scrutinize JSON values to ensure it's in the expected format + if isinstance(required_state, list): + for state_event_dict in required_state: + # Yell because we're in a test and this is unexpected + assert isinstance(state_event_dict, dict), ( + "`required_state` should be a list of event dicts" + ) + + event_type = state_event_dict["type"] + event_state_key = state_event_dict["state_key"] + + # Yell because we're in a test and this is unexpected + assert isinstance(event_type, str), ( + "Each event in `required_state` should have a string `type`" + ) + assert isinstance(event_state_key, str), ( + "Each event in `required_state` should have a string `state_key`" + ) + + state_map[(event_type, event_state_key)] = make_event_from_dict( + state_event_dict + ) + else: + # Yell because we're in a test and this is unexpected + raise AssertionError("`required_state` should be a list of event dicts") + + return state_map + + +@attr.s(slots=True, auto_attribs=True) +class RemoteRoomJoinResult: + remote_room_id: str + room_version: RoomVersion + remote_room_creator_user_id: str + local_user1_id: str + local_user1_tok: str + state_map: StateMap[EventBase] + + +class OutOfBandMembershipTests(unittest.FederatingHomeserverTestCase): + """ + Tests to make sure that interactions with out-of-band membership (outliers) works as + expected. + + - invites received over federation, before we join the room + - *rejections* for said invites + + See the "Out-of-band membership events" section in + `docs/development/room-dag-concepts.md` for more information. + """ + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + sync.register_servlets, + ] + + sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + + def default_config(self) -> JsonDict: + conf = super().default_config() + # Federation sending is disabled by default in the test environment + # so we need to enable it like this. + conf["federation_sender_instances"] = ["master"] + + return conf + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.federation_http_client = Mock( + # The problem with using `spec=MatrixFederationHttpClient` here is that it + # requires everything to be mocked which is a lot of work that I don't want + # to do when the code only uses a few methods (`get_json` and `put_json`). + ) + return self.setup_test_homeserver( + federation_http_client=self.federation_http_client + ) + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) + + self.store = self.hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + + def do_sync( + self, sync_body: JsonDict, *, since: Optional[str] = None, tok: str + ) -> Tuple[JsonDict, str]: + """Do a sliding sync request with given body. + + Asserts the request was successful. + + Attributes: + sync_body: The full request body to use + since: Optional since token + tok: Access token to use + + Returns: + A tuple of the response body and the `pos` field. + """ + + sync_path = self.sync_endpoint + if since: + sync_path += f"?pos={since}" + + channel = self.make_request( + method="POST", + path=sync_path, + content=sync_body, + access_token=tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + return channel.json_body, channel.json_body["pos"] + + def _invite_local_user_to_remote_room_and_join(self) -> RemoteRoomJoinResult: + """ + Helper to reproduce this scenario: + + 1. The remote user invites our local user to a room on their remote server (which + creates an out-of-band invite membership for user1 on our local server). + 2. The local user notices the invite from `/sync`. + 3. The local user joins the room. + 4. The local user can see that they are now joined to the room from `/sync`. + """ + + # Create a local user + local_user1_id = self.register_user("user1", "pass") + local_user1_tok = self.login(local_user1_id, "pass") + + # Create a remote room + room_creator_user_id = f"@remote-user:{self.OTHER_SERVER_NAME}" + remote_room_id = f"!remote-room:{self.OTHER_SERVER_NAME}" + room_version = RoomVersions.V10 + + room_create_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "room_id": remote_room_id, + "sender": room_creator_user_id, + "depth": 1, + "origin_server_ts": 1, + "type": EventTypes.Create, + "state_key": "", + "content": { + # The `ROOM_CREATOR` field could be removed if we used a room + # version > 10 (in favor of relying on `sender`) + EventContentFields.ROOM_CREATOR: room_creator_user_id, + EventContentFields.ROOM_VERSION: room_version.identifier, + }, + "auth_events": [], + "prev_events": [], + } + ), + room_version=room_version, + ) + + creator_membership_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "room_id": remote_room_id, + "sender": room_creator_user_id, + "depth": 2, + "origin_server_ts": 2, + "type": EventTypes.Member, + "state_key": room_creator_user_id, + "content": {"membership": Membership.JOIN}, + "auth_events": [room_create_event.event_id], + "prev_events": [room_create_event.event_id], + } + ), + room_version=room_version, + ) + + # From the remote homeserver, invite user1 on the local homserver + user1_invite_membership_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "room_id": remote_room_id, + "sender": room_creator_user_id, + "depth": 3, + "origin_server_ts": 3, + "type": EventTypes.Member, + "state_key": local_user1_id, + "content": {"membership": Membership.INVITE}, + "auth_events": [ + room_create_event.event_id, + creator_membership_event.event_id, + ], + "prev_events": [creator_membership_event.event_id], + } + ), + room_version=room_version, + ) + channel = self.make_signed_federation_request( + "PUT", + f"/_matrix/federation/v2/invite/{remote_room_id}/{user1_invite_membership_event.event_id}", + content={ + "event": user1_invite_membership_event.get_dict(), + "invite_room_state": [ + strip_event(room_create_event), + ], + "room_version": room_version.identifier, + }, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [(EventTypes.Member, StateValues.WILDCARD)], + "timeline_limit": 0, + } + } + } + + # Sync until the local user1 can see the invite + with test_timeout( + 3, + "Unable to find user1's invite event in the room", + ): + while True: + response_body, _ = self.do_sync(sync_body, tok=local_user1_tok) + if ( + remote_room_id in response_body["rooms"].keys() + # If they have `invite_state` for the room, they are invited + and len( + response_body["rooms"][remote_room_id].get("invite_state", []) + ) + > 0 + ): + break + + # Prevent tight-looping to allow the `test_timeout` to work + time.sleep(0.1) + + user1_join_membership_event_template = make_event_from_dict( + { + "room_id": remote_room_id, + "sender": local_user1_id, + "depth": 4, + "origin_server_ts": 4, + "type": EventTypes.Member, + "state_key": local_user1_id, + "content": {"membership": Membership.JOIN}, + "auth_events": [ + room_create_event.event_id, + user1_invite_membership_event.event_id, + ], + "prev_events": [user1_invite_membership_event.event_id], + }, + room_version=room_version, + ) + + T = TypeVar("T") + + # Mock the remote homeserver responding to our HTTP requests + # + # We're going to mock the following endpoints so that user1 can join the remote room: + # - GET /_matrix/federation/v1/make_join/{room_id}/{user_id} + # - PUT /_matrix/federation/v2/send_join/{room_id}/{user_id} + # + async def get_json( + destination: str, + path: str, + args: Optional[QueryParams] = None, + retry_on_dns_fail: bool = True, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + try_trailing_slash_on_400: bool = False, + parser: Optional[ByteParser[T]] = None, + ) -> Union[JsonDict, T]: + if ( + path + == f"/_matrix/federation/v1/make_join/{urllib.parse.quote_plus(remote_room_id)}/{urllib.parse.quote_plus(local_user1_id)}" + ): + return { + "event": user1_join_membership_event_template.get_pdu_json(), + "room_version": room_version.identifier, + } + + raise NotImplementedError( + "We have not mocked a response for `get_json(...)` for the following endpoint yet: " + + f"{destination}{path}" + ) + + self.federation_http_client.get_json.side_effect = get_json + + # PDU's that hs1 sent to hs2 + collected_pdus_from_hs1_federation_send: Set[str] = set() + + async def put_json( + destination: str, + path: str, + args: Optional[QueryParams] = None, + data: Optional[JsonDict] = None, + json_data_callback: Optional[Callable[[], JsonDict]] = None, + long_retries: bool = False, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + backoff_on_404: bool = False, + try_trailing_slash_on_400: bool = False, + parser: Optional[ByteParser[T]] = None, + backoff_on_all_error_codes: bool = False, + ) -> Union[JsonDict, T, SendJoinResponse]: + if ( + path.startswith( + f"/_matrix/federation/v2/send_join/{urllib.parse.quote_plus(remote_room_id)}/" + ) + and data is not None + and data.get("type") == EventTypes.Member + and data.get("state_key") == local_user1_id + # We're assuming this is a `ByteParser[SendJoinResponse]` + and parser is not None + ): + # As the remote server, we need to sign the event before sending it back + user1_join_membership_event_signed = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server(data), + room_version=room_version, + ) + + # Since they passed in a `parser`, we need to return the type that + # they're expecting instead of just a `JsonDict` + return SendJoinResponse( + auth_events=[ + room_create_event, + user1_invite_membership_event, + ], + state=[ + room_create_event, + creator_membership_event, + user1_invite_membership_event, + ], + event_dict=user1_join_membership_event_signed.get_pdu_json(), + event=user1_join_membership_event_signed, + members_omitted=False, + servers_in_room=[ + self.OTHER_SERVER_NAME, + ], + ) + + if path.startswith("/_matrix/federation/v1/send/") and data is not None: + for pdu in data.get("pdus", []): + event = event_from_pdu_json(pdu, room_version) + collected_pdus_from_hs1_federation_send.add(event.event_id) + + # Just acknowledge everything hs1 is trying to send hs2 + return { + event_from_pdu_json(pdu, room_version).event_id: {} + for pdu in data.get("pdus", []) + } + + raise NotImplementedError( + "We have not mocked a response for `put_json(...)` for the following endpoint yet: " + + f"{destination}{path} with the following body data: {data}" + ) + + self.federation_http_client.put_json.side_effect = put_json + + # User1 joins the room + self.helper.join(remote_room_id, local_user1_id, tok=local_user1_tok) + + # Reset the mocks now that user1 has joined the room + self.federation_http_client.get_json.side_effect = None + self.federation_http_client.put_json.side_effect = None + + # Sync until the local user1 can see that they are now joined to the room + with test_timeout( + 3, + "Unable to find user1's join event in the room", + ): + while True: + response_body, _ = self.do_sync(sync_body, tok=local_user1_tok) + if remote_room_id in response_body["rooms"].keys(): + required_state_map = required_state_json_to_state_map( + response_body["rooms"][remote_room_id]["required_state"] + ) + if ( + required_state_map.get((EventTypes.Member, local_user1_id)) + is not None + ): + break + + # Prevent tight-looping to allow the `test_timeout` to work + time.sleep(0.1) + + # Nothing needs to be sent from hs1 to hs2 since we already let the other + # homeserver know by doing the `/make_join` and `/send_join` dance. + self.assertIncludes( + collected_pdus_from_hs1_federation_send, + set(), + exact=True, + message="Didn't expect any events to be sent from hs1 over federation to hs2", + ) + + return RemoteRoomJoinResult( + remote_room_id=remote_room_id, + room_version=room_version, + remote_room_creator_user_id=room_creator_user_id, + local_user1_id=local_user1_id, + local_user1_tok=local_user1_tok, + state_map=self.get_success( + self.storage_controllers.state.get_current_state(remote_room_id) + ), + ) + + def test_can_join_from_out_of_band_invite(self) -> None: + """ + Test to make sure that we can join a room that we were invited to over + federation; even if our server has never participated in the room before. + """ + self._invite_local_user_to_remote_room_and_join() + + @parameterized.expand( + [("accept invite", Membership.JOIN), ("reject invite", Membership.LEAVE)] + ) + def test_can_x_from_out_of_band_invite_after_we_are_already_participating_in_the_room( + self, _test_description: str, membership_action: str + ) -> None: + """ + Test to make sure that we can do either a) join the room (accept the invite) or + b) reject the invite after being invited to over federation; even if we are + already participating in the room. + + This is a regression test to make sure we stress the scenario where even though + we are already participating in the room, local users can still react to invites + regardless of whether the remote server has told us about the invite event (via + a federation `/send` transaction) and we have de-outliered the invite event. + Previously, we would mistakenly throw an error saying the user wasn't in the + room when they tried to join or reject the invite. + """ + remote_room_join_result = self._invite_local_user_to_remote_room_and_join() + remote_room_id = remote_room_join_result.remote_room_id + room_version = remote_room_join_result.room_version + + # Create another local user + local_user2_id = self.register_user("user2", "pass") + local_user2_tok = self.login(local_user2_id, "pass") + + T = TypeVar("T") + + # PDU's that hs1 sent to hs2 + collected_pdus_from_hs1_federation_send: Set[str] = set() + + async def put_json( + destination: str, + path: str, + args: Optional[QueryParams] = None, + data: Optional[JsonDict] = None, + json_data_callback: Optional[Callable[[], JsonDict]] = None, + long_retries: bool = False, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + backoff_on_404: bool = False, + try_trailing_slash_on_400: bool = False, + parser: Optional[ByteParser[T]] = None, + backoff_on_all_error_codes: bool = False, + ) -> Union[JsonDict, T]: + if path.startswith("/_matrix/federation/v1/send/") and data is not None: + for pdu in data.get("pdus", []): + event = event_from_pdu_json(pdu, room_version) + collected_pdus_from_hs1_federation_send.add(event.event_id) + + # Just acknowledge everything hs1 is trying to send hs2 + return { + event_from_pdu_json(pdu, room_version).event_id: {} + for pdu in data.get("pdus", []) + } + + raise NotImplementedError( + "We have not mocked a response for `put_json(...)` for the following endpoint yet: " + + f"{destination}{path} with the following body data: {data}" + ) + + self.federation_http_client.put_json.side_effect = put_json + + # From the remote homeserver, invite user2 on the local homserver + user2_invite_membership_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "room_id": remote_room_id, + "sender": remote_room_join_result.remote_room_creator_user_id, + "depth": 5, + "origin_server_ts": 5, + "type": EventTypes.Member, + "state_key": local_user2_id, + "content": {"membership": Membership.INVITE}, + "auth_events": [ + remote_room_join_result.state_map[ + (EventTypes.Create, "") + ].event_id, + remote_room_join_result.state_map[ + ( + EventTypes.Member, + remote_room_join_result.remote_room_creator_user_id, + ) + ].event_id, + ], + "prev_events": [ + remote_room_join_result.state_map[ + (EventTypes.Member, remote_room_join_result.local_user1_id) + ].event_id + ], + } + ), + room_version=room_version, + ) + channel = self.make_signed_federation_request( + "PUT", + f"/_matrix/federation/v2/invite/{remote_room_id}/{user2_invite_membership_event.event_id}", + content={ + "event": user2_invite_membership_event.get_dict(), + "invite_room_state": [ + strip_event( + remote_room_join_result.state_map[(EventTypes.Create, "")] + ), + ], + "room_version": room_version.identifier, + }, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [(EventTypes.Member, StateValues.WILDCARD)], + "timeline_limit": 0, + } + } + } + + # Sync until the local user2 can see the invite + with test_timeout( + 3, + "Unable to find user2's invite event in the room", + ): + while True: + response_body, _ = self.do_sync(sync_body, tok=local_user2_tok) + if ( + remote_room_id in response_body["rooms"].keys() + # If they have `invite_state` for the room, they are invited + and len( + response_body["rooms"][remote_room_id].get("invite_state", []) + ) + > 0 + ): + break + + # Prevent tight-looping to allow the `test_timeout` to work + time.sleep(0.1) + + if membership_action == Membership.JOIN: + # User2 joins the room + join_event = self.helper.join( + remote_room_join_result.remote_room_id, + local_user2_id, + tok=local_user2_tok, + ) + expected_pdu_event_id = join_event["event_id"] + elif membership_action == Membership.LEAVE: + # User2 rejects the invite + leave_event = self.helper.leave( + remote_room_join_result.remote_room_id, + local_user2_id, + tok=local_user2_tok, + ) + expected_pdu_event_id = leave_event["event_id"] + else: + raise NotImplementedError( + "This test does not support this membership action yet" + ) + + # Sync until the local user2 can see their new membership in the room + with test_timeout( + 3, + "Unable to find user2's new membership event in the room", + ): + while True: + response_body, _ = self.do_sync(sync_body, tok=local_user2_tok) + if membership_action == Membership.JOIN: + if remote_room_id in response_body["rooms"].keys(): + required_state_map = required_state_json_to_state_map( + response_body["rooms"][remote_room_id]["required_state"] + ) + if ( + required_state_map.get((EventTypes.Member, local_user2_id)) + is not None + ): + break + elif membership_action == Membership.LEAVE: + if remote_room_id not in response_body["rooms"].keys(): + break + else: + raise NotImplementedError( + "This test does not support this membership action yet" + ) + + # Prevent tight-looping to allow the `test_timeout` to work + time.sleep(0.1) + + # Make sure that we let hs2 know about the new membership event + self.assertIncludes( + collected_pdus_from_hs1_federation_send, + {expected_pdu_event_id}, + exact=True, + message="Expected to find the event ID of the user2 membership to be sent from hs1 over federation to hs2", + ) diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index 6a8887fe74..cd906bbbc7 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py
@@ -34,6 +34,7 @@ from synapse.handlers.device import DeviceHandler from synapse.rest import admin from synapse.rest.client import login from synapse.server import HomeServer +from synapse.storage.databases.main.events_worker import EventMetadata from synapse.types import JsonDict, ReadReceipt from synapse.util import Clock @@ -55,12 +56,15 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): federation_transport_client=self.federation_transport_client, ) - hs.get_storage_controllers().state.get_current_hosts_in_room = AsyncMock( # type: ignore[method-assign] + self.main_store = hs.get_datastores().main + self.state_controller = hs.get_storage_controllers().state + + self.state_controller.get_current_hosts_in_room = AsyncMock( # type: ignore[method-assign] return_value={"test", "host2"} ) - hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = ( # type: ignore[method-assign] - hs.get_storage_controllers().state.get_current_hosts_in_room + self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = ( # type: ignore[method-assign] + self.state_controller.get_current_hosts_in_room ) return hs @@ -185,12 +189,15 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): ], ) - def test_send_receipts_with_backoff(self) -> None: - """Send two receipts in quick succession; the second should be flushed, but - only after 20ms""" + def test_send_receipts_with_backoff_small_room(self) -> None: + """Read receipt in small rooms should not be delayed""" mock_send_transaction = self.federation_transport_client.send_transaction mock_send_transaction.return_value = {} + self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = AsyncMock( # type: ignore[method-assign] + return_value={"test", "host2"} + ) + sender = self.hs.get_federation_sender() receipt = ReadReceipt( "room_id", @@ -206,47 +213,104 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): # expect a call to send_transaction mock_send_transaction.assert_called_once() - json_cb = mock_send_transaction.call_args[0][1] - data = json_cb() - self.assertEqual( - data["edus"], - [ - { - "edu_type": EduTypes.RECEIPT, - "content": { - "room_id": { - "m.read": { - "user_id": { - "event_ids": ["event_id"], - "data": {"ts": 1234}, - } - } - } - }, - } - ], + self._assert_edu_in_call(mock_send_transaction.call_args[0][1]) + + def test_send_receipts_with_backoff_recent_event(self) -> None: + """Read receipt for a recent message should not be delayed""" + mock_send_transaction = self.federation_transport_client.send_transaction + mock_send_transaction.return_value = {} + + # Pretend this is a big room + self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = AsyncMock( # type: ignore[method-assign] + return_value={"test"} | {f"host{i}" for i in range(20)} ) + + self.main_store.get_metadata_for_event = AsyncMock( + return_value=EventMetadata( + received_ts=self.clock.time_msec(), + sender="@test:test", + ) + ) + + sender = self.hs.get_federation_sender() + receipt = ReadReceipt( + "room_id", + "m.read", + "user_id", + ["event_id"], + thread_id=None, + data={"ts": 1234}, + ) + self.get_success(sender.send_read_receipt(receipt)) + + self.pump() + + # expect a call to send_transaction for each host + self.assertEqual(mock_send_transaction.call_count, 20) + self._assert_edu_in_call(mock_send_transaction.call_args.args[1]) + mock_send_transaction.reset_mock() - # send the second RR + def test_send_receipts_with_backoff_sender(self) -> None: + """Read receipt for a message should not be delayed to the sender, but + is delayed to everyone else""" + mock_send_transaction = self.federation_transport_client.send_transaction + mock_send_transaction.return_value = {} + + # Pretend this is a big room + self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = AsyncMock( # type: ignore[method-assign] + return_value={"test"} | {f"host{i}" for i in range(20)} + ) + + self.main_store.get_metadata_for_event = AsyncMock( + return_value=EventMetadata( + received_ts=self.clock.time_msec() - 5 * 60_000, + sender="@test:host1", + ) + ) + + sender = self.hs.get_federation_sender() receipt = ReadReceipt( "room_id", "m.read", "user_id", - ["other_id"], + ["event_id"], thread_id=None, data={"ts": 1234}, ) - self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) + self.get_success(sender.send_read_receipt(receipt)) + self.pump() - mock_send_transaction.assert_not_called() - self.reactor.advance(19) - mock_send_transaction.assert_not_called() + # First, expect a call to send_transaction for the sending host + mock_send_transaction.assert_called() - self.reactor.advance(10) - mock_send_transaction.assert_called_once() - json_cb = mock_send_transaction.call_args[0][1] + transaction = mock_send_transaction.call_args_list[0].args[0] + self.assertEqual(transaction.destination, "host1") + self._assert_edu_in_call(mock_send_transaction.call_args_list[0].args[1]) + + # We also expect a call to one of the other hosts, as the first + # destination to wake up. + self.assertEqual(mock_send_transaction.call_count, 2) + self._assert_edu_in_call(mock_send_transaction.call_args.args[1]) + + mock_send_transaction.reset_mock() + + # We now expect to see 18 more transactions to the remaining hosts + # periodically. + for _ in range(18): + self.reactor.advance( + 1.0 + / self.hs.config.ratelimiting.federation_rr_transactions_per_room_per_second + ) + + mock_send_transaction.assert_called_once() + self._assert_edu_in_call(mock_send_transaction.call_args.args[1]) + mock_send_transaction.reset_mock() + + def _assert_edu_in_call(self, json_cb: Callable[[], JsonDict]) -> None: + """Assert that the given `json_cb` from a `send_transaction` has a + receipt in it.""" data = json_cb() self.assertEqual( data["edus"], @@ -257,7 +321,7 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): "room_id": { "m.read": { "user_id": { - "event_ids": ["other_id"], + "event_ids": ["event_id"], "data": {"ts": 1234}, } } diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index 88261450b1..42dc844734 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py
@@ -20,14 +20,21 @@ # import logging from http import HTTPStatus +from typing import Optional, Union +from unittest.mock import Mock from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.api.constants import EventTypes, Membership +from synapse.api.errors import FederationError +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.events import EventBase, make_event_from_dict +from synapse.federation.federation_base import event_from_pdu_json +from synapse.http.types import QueryParams +from synapse.logging.context import LoggingContext from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer @@ -85,6 +92,163 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase): self.assertEqual(500, channel.code, channel.result) +def _create_acl_event(content: JsonDict) -> EventBase: + return make_event_from_dict( + { + "room_id": "!a:b", + "event_id": "$a:b", + "type": "m.room.server_acls", + "sender": "@a:b", + "content": content, + } + ) + + +class MessageAcceptTests(unittest.FederatingHomeserverTestCase): + """ + Tests to make sure that we don't accept flawed events from federation (incoming). + """ + + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.http_client = Mock() + return self.setup_test_homeserver(federation_http_client=self.http_client) + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) + + self.store = self.hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + self.federation_event_handler = self.hs.get_federation_event_handler() + + # Create a local room + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + self.room_id = self.helper.create_room_as( + user1_id, tok=user1_tok, is_public=True + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(self.room_id) + ) + + # Figure out what the forward extremities in the room are (the most recent + # events that aren't tied into the DAG) + forward_extremity_event_ids = self.get_success( + self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) + ) + + # Join a remote user to the room that will attempt to send bad events + self.remote_bad_user_id = f"@baduser:{self.OTHER_SERVER_NAME}" + self.remote_bad_user_join_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "room_id": self.room_id, + "sender": self.remote_bad_user_id, + "state_key": self.remote_bad_user_id, + "depth": 1000, + "origin_server_ts": 1, + "type": EventTypes.Member, + "content": {"membership": Membership.JOIN}, + "auth_events": [ + state_map[(EventTypes.Create, "")].event_id, + state_map[(EventTypes.JoinRules, "")].event_id, + ], + "prev_events": list(forward_extremity_event_ids), + } + ), + room_version=RoomVersions.V10, + ) + + # Send the join, it should return None (which is not an error) + self.assertEqual( + self.get_success( + self.federation_event_handler.on_receive_pdu( + self.OTHER_SERVER_NAME, self.remote_bad_user_join_event + ) + ), + None, + ) + + # Make sure we actually joined the room + self.assertEqual( + self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)), + {self.remote_bad_user_join_event.event_id}, + ) + + def test_cant_hide_direct_ancestors(self) -> None: + """ + If you send a message, you must be able to provide the direct + prev_events that said event references. + """ + + async def post_json( + destination: str, + path: str, + data: Optional[JsonDict] = None, + long_retries: bool = False, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + args: Optional[QueryParams] = None, + ) -> Union[JsonDict, list]: + # If it asks us for new missing events, give them NOTHING + if path.startswith("/_matrix/federation/v1/get_missing_events/"): + return {"events": []} + return {} + + self.http_client.post_json = post_json + + # Figure out what the forward extremities in the room are (the most recent + # events that aren't tied into the DAG) + forward_extremity_event_ids = self.get_success( + self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) + ) + + # Now lie about an event's prev_events + lying_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "room_id": self.room_id, + "sender": self.remote_bad_user_id, + "depth": 1000, + "origin_server_ts": 1, + "type": "m.room.message", + "content": {"body": "hewwo?"}, + "auth_events": [], + "prev_events": ["$missing_prev_event"] + + list(forward_extremity_event_ids), + } + ), + room_version=RoomVersions.V10, + ) + + with LoggingContext("test-context"): + failure = self.get_failure( + self.federation_event_handler.on_receive_pdu( + self.OTHER_SERVER_NAME, lying_event + ), + FederationError, + ) + + # on_receive_pdu should throw an error + self.assertEqual( + failure.value.args[0], + ( + "ERROR 403: Your server isn't divulging details about prev_events " + "referenced in this event." + ), + ) + + # Make sure the invalid event isn't there + extrem = self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)) + self.assertEqual(extrem, {self.remote_bad_user_join_event.event_id}) + + class ServerACLsTestCase(unittest.TestCase): def test_blocked_server(self) -> None: e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]}) @@ -355,13 +519,76 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): # is probably sufficient to reassure that the bucket is updated. -def _create_acl_event(content: JsonDict) -> EventBase: - return make_event_from_dict( - { - "room_id": "!a:b", - "event_id": "$a:b", - "type": "m.room.server_acls", - "sender": "@a:b", - "content": content, +class StripUnsignedFromEventsTestCase(unittest.TestCase): + """ + Test to make sure that we handle the raw JSON events from federation carefully and + strip anything that shouldn't be there. + """ + + def test_strip_unauthorized_unsigned_values(self) -> None: + event1 = { + "sender": "@baduser:test.serv", + "state_key": "@baduser:test.serv", + "event_id": "$event1:test.serv", + "depth": 1000, + "origin_server_ts": 1, + "type": "m.room.member", + "origin": "test.servx", + "content": {"membership": "join"}, + "auth_events": [], + "unsigned": {"malicious garbage": "hackz", "more warez": "more hackz"}, } - ) + filtered_event = event_from_pdu_json(event1, RoomVersions.V1) + # Make sure unauthorized fields are stripped from unsigned + self.assertNotIn("more warez", filtered_event.unsigned) + + def test_strip_event_maintains_allowed_fields(self) -> None: + event2 = { + "sender": "@baduser:test.serv", + "state_key": "@baduser:test.serv", + "event_id": "$event2:test.serv", + "depth": 1000, + "origin_server_ts": 1, + "type": "m.room.member", + "origin": "test.servx", + "auth_events": [], + "content": {"membership": "join"}, + "unsigned": { + "malicious garbage": "hackz", + "more warez": "more hackz", + "age": 14, + "invite_room_state": [], + }, + } + + filtered_event2 = event_from_pdu_json(event2, RoomVersions.V1) + self.assertIn("age", filtered_event2.unsigned) + self.assertEqual(14, filtered_event2.unsigned["age"]) + self.assertNotIn("more warez", filtered_event2.unsigned) + # Invite_room_state is allowed in events of type m.room.member + self.assertIn("invite_room_state", filtered_event2.unsigned) + self.assertEqual([], filtered_event2.unsigned["invite_room_state"]) + + def test_strip_event_removes_fields_based_on_event_type(self) -> None: + event3 = { + "sender": "@baduser:test.serv", + "state_key": "@baduser:test.serv", + "event_id": "$event3:test.serv", + "depth": 1000, + "origin_server_ts": 1, + "type": "m.room.power_levels", + "origin": "test.servx", + "content": {}, + "auth_events": [], + "unsigned": { + "malicious garbage": "hackz", + "more warez": "more hackz", + "age": 14, + "invite_room_state": [], + }, + } + filtered_event3 = event_from_pdu_json(event3, RoomVersions.V1) + self.assertIn("age", filtered_event3.unsigned) + # Invite_room_state field is only permitted in event type m.room.member + self.assertNotIn("invite_room_state", filtered_event3.unsigned) + self.assertNotIn("more warez", filtered_event3.unsigned) diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py
index 9ff853a83d..c5bff468e2 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py
@@ -260,7 +260,6 @@ class ExfiltrateData(unittest.HomeserverTestCase): self.assertEqual(args[0]["name"], self.user2) self.assertIn("displayname", args[0]) self.assertIn("avatar_url", args[0]) - self.assertIn("threepids", args[0]) self.assertIn("external_ids", args[0]) self.assertIn("creation_ts", args[0]) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py
index 1eec0d43b7..1db630e9e4 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py
@@ -1165,12 +1165,23 @@ class ApplicationServicesHandlerOtkCountsTestCase(unittest.HomeserverTestCase): self.hs.get_datastores().main.services_cache = [self._service] # Register some appservice users - self._sender_user, self._sender_device = self.register_appservice_user( + user_id, device_id = self.register_appservice_user( "as.sender", self._service_token ) - self._namespaced_user, self._namespaced_device = self.register_appservice_user( + # With MSC4190 enabled, there will not be a device created + # during AS registration. However MSC4190 is not enabled + # in this test. It may become the default behaviour in the + # future, in which case this test will need to be updated. + assert device_id is not None + self._sender_user = user_id + self._sender_device = device_id + + user_id, device_id = self.register_appservice_user( "_as_user1", self._service_token ) + assert device_id is not None + self._namespaced_user = user_id + self._namespaced_device = device_id # Register a real user as well. self._real_user = self.register_user("real.user", "meow") diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py deleted file mode 100644
index f41f7d36ad..0000000000 --- a/tests/handlers/test_cas.py +++ /dev/null
@@ -1,239 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2020 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# -from typing import Any, Dict -from unittest.mock import AsyncMock, Mock - -from twisted.test.proto_helpers import MemoryReactor - -from synapse.handlers.cas import CasResponse -from synapse.server import HomeServer -from synapse.util import Clock - -from tests.unittest import HomeserverTestCase, override_config - -# These are a few constants that are used as config parameters in the tests. -BASE_URL = "https://synapse/" -SERVER_URL = "https://issuer/" - - -class CasHandlerTestCase(HomeserverTestCase): - def default_config(self) -> Dict[str, Any]: - config = super().default_config() - config["public_baseurl"] = BASE_URL - cas_config = { - "enabled": True, - "server_url": SERVER_URL, - "service_url": BASE_URL, - } - - # Update this config with what's in the default config so that - # override_config works as expected. - cas_config.update(config.get("cas_config", {})) - config["cas_config"] = cas_config - - return config - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver() - - self.handler = hs.get_cas_handler() - - # Reduce the number of attempts when generating MXIDs. - sso_handler = hs.get_sso_handler() - sso_handler._MAP_USERNAME_RETRIES = 3 - - return hs - - def test_map_cas_user_to_user(self) -> None: - """Ensure that mapping the CAS user returned from a provider to an MXID works properly.""" - - # stub out the auth handler - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] - - cas_response = CasResponse("test_user", {}) - request = _mock_request() - self.get_success( - self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") - ) - - # check that the auth handler got called as expected - auth_handler.complete_sso_login.assert_called_once_with( - "@test_user:test", - "cas", - request, - "redirect_uri", - None, - new_user=True, - auth_provider_session_id=None, - ) - - def test_map_cas_user_to_existing_user(self) -> None: - """Existing users can log in with CAS account.""" - store = self.hs.get_datastores().main - self.get_success( - store.register_user(user_id="@test_user:test", password_hash=None) - ) - - # stub out the auth handler - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] - - # Map a user via SSO. - cas_response = CasResponse("test_user", {}) - request = _mock_request() - self.get_success( - self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") - ) - - # check that the auth handler got called as expected - auth_handler.complete_sso_login.assert_called_once_with( - "@test_user:test", - "cas", - request, - "redirect_uri", - None, - new_user=False, - auth_provider_session_id=None, - ) - - # Subsequent calls should map to the same mxid. - auth_handler.complete_sso_login.reset_mock() - self.get_success( - self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") - ) - auth_handler.complete_sso_login.assert_called_once_with( - "@test_user:test", - "cas", - request, - "redirect_uri", - None, - new_user=False, - auth_provider_session_id=None, - ) - - def test_map_cas_user_to_invalid_localpart(self) -> None: - """CAS automaps invalid characters to base-64 encoding.""" - - # stub out the auth handler - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] - - cas_response = CasResponse("föö", {}) - request = _mock_request() - self.get_success( - self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") - ) - - # check that the auth handler got called as expected - auth_handler.complete_sso_login.assert_called_once_with( - "@f=c3=b6=c3=b6:test", - "cas", - request, - "redirect_uri", - None, - new_user=True, - auth_provider_session_id=None, - ) - - @override_config( - { - "cas_config": { - "required_attributes": {"userGroup": "staff", "department": None} - } - } - ) - def test_required_attributes(self) -> None: - """The required attributes must be met from the CAS response.""" - - # stub out the auth handler - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] - - # The response doesn't have the proper userGroup or department. - cas_response = CasResponse("test_user", {}) - request = _mock_request() - self.get_success( - self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") - ) - auth_handler.complete_sso_login.assert_not_called() - - # The response doesn't have any department. - cas_response = CasResponse("test_user", {"userGroup": ["staff"]}) - request.reset_mock() - self.get_success( - self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") - ) - auth_handler.complete_sso_login.assert_not_called() - - # Add the proper attributes and it should succeed. - cas_response = CasResponse( - "test_user", {"userGroup": ["staff", "admin"], "department": ["sales"]} - ) - request.reset_mock() - self.get_success( - self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") - ) - - # check that the auth handler got called as expected - auth_handler.complete_sso_login.assert_called_once_with( - "@test_user:test", - "cas", - request, - "redirect_uri", - None, - new_user=True, - auth_provider_session_id=None, - ) - - @override_config({"cas_config": {"enable_registration": False}}) - def test_map_cas_user_does_not_register_new_user(self) -> None: - """Ensures new users are not registered if the enabled registration flag is disabled.""" - - # stub out the auth handler - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] - - cas_response = CasResponse("test_user", {}) - request = _mock_request() - self.get_success( - self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") - ) - - # check that the auth handler was not called as expected - auth_handler.complete_sso_login.assert_not_called() - - -def _mock_request() -> Mock: - """Returns a mock which will stand in as a SynapseRequest""" - mock = Mock( - spec=[ - "finish", - "getClientAddress", - "getHeader", - "setHeader", - "setResponseCode", - "write", - ] - ) - # `_disconnected` musn't be another `Mock`, otherwise it will be truthy. - mock._disconnected = False - return mock diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py
index 4a3e36ffde..b7058d8002 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py
@@ -587,6 +587,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): self.room_list_handler = hs.get_room_list_handler() self.directory_handler = hs.get_directory_handler() + @unittest.override_config({"room_list_publication_rules": [{"action": "allow"}]}) def test_disabling_room_list(self) -> None: self.room_list_handler.enable_room_list_search = True self.directory_handler.enable_room_list_search = True diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index 8a3dfdcf75..70fc4263e7 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py
@@ -19,6 +19,7 @@ # [This file includes modifications made by New Vector Limited] # # +import time from typing import Dict, Iterable from unittest import mock @@ -151,18 +152,30 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): def test_claim_one_time_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" - keys = {"alg1:k1": "key1"} - res = self.get_success( self.handler.upload_keys_for_user( - local_user, device_id, {"one_time_keys": keys} + local_user, device_id, {"one_time_keys": {"alg1:k1": "key1"}} ) ) self.assertDictEqual( res, {"one_time_key_counts": {"alg1": 1, "signed_curve25519": 0}} ) - res2 = self.get_success( + # Keys should be returned in the order they were uploaded. To test, advance time + # a little, then upload a second key with an earlier key ID; it should get + # returned second. + self.reactor.advance(1) + res = self.get_success( + self.handler.upload_keys_for_user( + local_user, device_id, {"one_time_keys": {"alg1:k0": "key0"}} + ) + ) + self.assertDictEqual( + res, {"one_time_key_counts": {"alg1": 2, "signed_curve25519": 0}} + ) + + # now claim both keys back. They should be in the same order + res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, self.requester, @@ -171,12 +184,27 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): ) ) self.assertEqual( - res2, + res, { "failures": {}, "one_time_keys": {local_user: {device_id: {"alg1:k1": "key1"}}}, }, ) + res = self.get_success( + self.handler.claim_one_time_keys( + {local_user: {device_id: {"alg1": 1}}}, + self.requester, + timeout=None, + always_include_fallback_keys=False, + ) + ) + self.assertEqual( + res, + { + "failures": {}, + "one_time_keys": {local_user: {device_id: {"alg1:k0": "key0"}}}, + }, + ) def test_claim_one_time_key_bulk(self) -> None: """Like test_claim_one_time_key but claims multiple keys in one handler call.""" @@ -336,6 +364,47 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): counts_by_alg, expected_counts_by_alg, f"{user_id}:{device_id}" ) + def test_claim_one_time_key_bulk_ordering(self) -> None: + """Keys returned by the bulk claim call should be returned in the correct order""" + + # Alice has lots of keys, uploaded in a specific order + alice = f"@alice:{self.hs.hostname}" + alice_dev = "alice_dev_1" + + self.get_success( + self.handler.upload_keys_for_user( + alice, + alice_dev, + {"one_time_keys": {"alg1:k20": 20, "alg1:k21": 21, "alg1:k22": 22}}, + ) + ) + # Advance time by 1s, to ensure that there is a difference in upload time. + self.reactor.advance(1) + self.get_success( + self.handler.upload_keys_for_user( + alice, + alice_dev, + {"one_time_keys": {"alg1:k10": 10, "alg1:k11": 11, "alg1:k12": 12}}, + ) + ) + + # Now claim some, and check we get the right ones. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + {alice: {alice_dev: {"alg1": 2}}}, + self.requester, + timeout=None, + always_include_fallback_keys=False, + ) + ) + # We should get the first-uploaded keys, even though they have later key ids. + # We should get a random set of two of k20, k21, k22. + self.assertEqual(claim_res["failures"], {}) + claimed_keys = claim_res["one_time_keys"]["@alice:test"]["alice_dev_1"] + self.assertEqual(len(claimed_keys), 2) + for key_id in claimed_keys.keys(): + self.assertIn(key_id, ["alg1:k20", "alg1:k21", "alg1:k22"]) + def test_fallback_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" @@ -1758,3 +1827,222 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): ) self.assertIs(exists, True) self.assertIs(replaceable_without_uia, False) + + def test_delete_old_one_time_keys(self) -> None: + """Test the db migration that clears out old OTKs""" + + # We upload two sets of keys, one just over a week ago, and one just less than + # a week ago. Each batch contains some keys that match the deletion pattern + # (key IDs of 6 chars), and some that do not. + # + # Finally, set the scheduled task going, and check what gets deleted. + + user_id = "@user000:" + self.hs.hostname + device_id = "xyz" + + # The scheduled task should be for "now" in real, wallclock time, so + # set the test reactor to just over a week ago. + self.reactor.advance(time.time() - 7.5 * 24 * 3600) + + # Upload some keys + self.get_success( + self.handler.upload_keys_for_user( + user_id, + device_id, + { + "one_time_keys": { + # some keys to delete + "alg1:AAAAAA": "key1", + "alg2:AAAAAB": {"key": "key2", "signatures": {"k1": "sig1"}}, + # A key to *not* delete + "alg2:AAAAAAAAAA": {"key": "key3"}, + } + }, + ) + ) + + # A day passes + self.reactor.advance(24 * 3600) + + # Upload some more keys + self.get_success( + self.handler.upload_keys_for_user( + user_id, + device_id, + { + "one_time_keys": { + # some keys which match the pattern + "alg1:BAAAAA": "key1", + "alg2:BAAAAB": {"key": "key2", "signatures": {"k1": "sig1"}}, + # A key to *not* delete + "alg2:BAAAAAAAAA": {"key": "key3"}, + } + }, + ) + ) + + # The rest of the week passes, which should set the scheduled task going. + self.reactor.advance(6.5 * 24 * 3600) + + # Check what we're left with in the database + remaining_key_ids = { + row[0] + for row in self.get_success( + self.handler.store.db_pool.simple_select_list( + "e2e_one_time_keys_json", None, ["key_id"] + ) + ) + } + self.assertEqual( + remaining_key_ids, {"AAAAAAAAAA", "BAAAAA", "BAAAAB", "BAAAAAAAAA"} + ) + + @override_config( + { + "experimental_features": { + "msc4263_limit_key_queries_to_users_who_share_rooms": True + } + } + ) + def test_query_devices_remote_restricted_not_in_shared_room(self) -> None: + """Tests that querying keys for a remote user that we don't share a room + with returns nothing. + """ + + remote_user_id = "@test:other" + local_user_id = "@test:test" + + # Do *not* pretend we're sharing a room with the user we're querying. + + remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" + remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" + + self.hs.get_federation_client().query_client_keys = mock.AsyncMock( # type: ignore[method-assign] + return_value={ + "device_keys": {remote_user_id: {}}, + "master_keys": { + remote_user_id: { + "user_id": remote_user_id, + "usage": ["master"], + "keys": {"ed25519:" + remote_master_key: remote_master_key}, + }, + }, + "self_signing_keys": { + remote_user_id: { + "user_id": remote_user_id, + "usage": ["self_signing"], + "keys": { + "ed25519:" + + remote_self_signing_key: remote_self_signing_key + }, + } + }, + } + ) + + e2e_handler = self.hs.get_e2e_keys_handler() + + query_result = self.get_success( + e2e_handler.query_devices( + { + "device_keys": {remote_user_id: []}, + }, + timeout=10, + from_user_id=local_user_id, + from_device_id="some_device_id", + ) + ) + + self.assertEqual( + query_result, + { + "device_keys": {}, + "failures": {}, + "master_keys": {}, + "self_signing_keys": {}, + "user_signing_keys": {}, + }, + ) + + @override_config( + { + "experimental_features": { + "msc4263_limit_key_queries_to_users_who_share_rooms": True + } + } + ) + def test_query_devices_remote_restricted_in_shared_room(self) -> None: + """Tests that querying keys for a remote user that we share a room + with returns the cross signing keys correctly. + """ + + remote_user_id = "@test:other" + local_user_id = "@test:test" + + # Pretend we're sharing a room with the user we're querying. If not, + # `query_devices` will filter out the user ID and `_query_devices_for_destination` + # will return early. + self.store.do_users_share_a_room_joined_or_invited = mock.AsyncMock( # type: ignore[method-assign] + return_value=[remote_user_id] + ) + self.store.get_rooms_for_user = mock.AsyncMock(return_value={"some_room_id"}) + + remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" + remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" + + self.hs.get_federation_client().query_user_devices = mock.AsyncMock( # type: ignore[method-assign] + return_value={ + "user_id": remote_user_id, + "stream_id": 1, + "devices": [], + "master_key": { + "user_id": remote_user_id, + "usage": ["master"], + "keys": {"ed25519:" + remote_master_key: remote_master_key}, + }, + "self_signing_key": { + "user_id": remote_user_id, + "usage": ["self_signing"], + "keys": { + "ed25519:" + remote_self_signing_key: remote_self_signing_key + }, + }, + } + ) + + e2e_handler = self.hs.get_e2e_keys_handler() + + query_result = self.get_success( + e2e_handler.query_devices( + { + "device_keys": {remote_user_id: []}, + }, + timeout=10, + from_user_id=local_user_id, + from_device_id="some_device_id", + ) + ) + + self.assertEqual(query_result["failures"], {}) + self.assertEqual( + query_result["master_keys"], + { + remote_user_id: { + "user_id": remote_user_id, + "usage": ["master"], + "keys": {"ed25519:" + remote_master_key: remote_master_key}, + } + }, + ) + self.assertEqual( + query_result["self_signing_keys"], + { + remote_user_id: { + "user_id": remote_user_id, + "usage": ["self_signing"], + "keys": { + "ed25519:" + remote_self_signing_key: remote_self_signing_key + }, + } + }, + ) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index 3fe5b0a1b4..b64a8a86a2 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py
@@ -44,7 +44,7 @@ from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.storage.databases.main.events_worker import EventCacheEntry from synapse.util import Clock -from synapse.util.stringutils import random_string +from synapse.util.events import generate_fake_event_id from tests import unittest from tests.test_utils import event_injection @@ -52,10 +52,6 @@ from tests.test_utils import event_injection logger = logging.getLogger(__name__) -def generate_fake_event_id() -> str: - return "$fake_" + random_string(43) - - class FederationTestCase(unittest.FederatingHomeserverTestCase): servlets = [ admin.register_servlets, @@ -665,9 +661,12 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase): ) ) - with patch.object( - fed_client, "make_membership_event", mock_make_membership_event - ), patch.object(fed_client, "send_join", mock_send_join): + with ( + patch.object( + fed_client, "make_membership_event", mock_make_membership_event + ), + patch.object(fed_client, "send_join", mock_send_join), + ): # Join and check that our join event is rejected # (The join event is rejected because it doesn't have any signatures) join_exc = self.get_failure( @@ -712,9 +711,12 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase): fed_handler = self.hs.get_federation_handler() store = self.hs.get_datastores().main - with patch.object( - fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room - ), patch.object(store, "is_partial_state_room", mock_is_partial_state_room): + with ( + patch.object( + fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room + ), + patch.object(store, "is_partial_state_room", mock_is_partial_state_room), + ): # Start the partial state sync. fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 1) @@ -764,9 +766,12 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase): fed_handler = self.hs.get_federation_handler() store = self.hs.get_datastores().main - with patch.object( - fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room - ), patch.object(store, "is_partial_state_room", mock_is_partial_state_room): + with ( + patch.object( + fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room + ), + patch.object(store, "is_partial_state_room", mock_is_partial_state_room), + ): # Start the partial state sync. fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 1) diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py
index 1b83aea579..51eca56c3b 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py
@@ -288,13 +288,15 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): } # We also expect an outbound request to /state - self.mock_federation_transport_client.get_room_state.return_value = StateRequestResponse( - # Mimic the other server not knowing about the state at all. - # We want to cause Synapse to throw an error (`Unable to get - # missing prev_event $fake_prev_event`) and fail to backfill - # the pulled event. - auth_events=[], - state=[], + self.mock_federation_transport_client.get_room_state.return_value = ( + StateRequestResponse( + # Mimic the other server not knowing about the state at all. + # We want to cause Synapse to throw an error (`Unable to get + # missing prev_event $fake_prev_event`) and fail to backfill + # the pulled event. + auth_events=[], + state=[], + ) ) pulled_event = make_event_from_dict( @@ -373,7 +375,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): In this test, we pretend we are processing a "pulled" event via backfill. The pulled event succesfully processes and the backward - extremeties are updated along with clearing out any failed pull attempts + extremities are updated along with clearing out any failed pull attempts for those old extremities. We check that we correctly cleared failed pull attempts of the @@ -805,6 +807,7 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}" main_store = self.hs.get_datastores().main + state_deletion_store = self.hs.get_datastores().state_deletion # Create the room. kermit_user_id = self.register_user("kermit", "test") @@ -956,7 +959,9 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): bert_member_event.event_id: bert_member_event, rejected_kick_event.event_id: rejected_kick_event, }, - state_res_store=StateResolutionStore(main_store), + state_res_store=StateResolutionStore( + main_store, state_deletion_store + ), ) ), [bert_member_event.event_id, rejected_kick_event.event_id], @@ -1001,7 +1006,9 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): rejected_power_levels_event.event_id, ], event_map={}, - state_res_store=StateResolutionStore(main_store), + state_res_store=StateResolutionStore( + main_store, state_deletion_store + ), full_conflicted_set=set(), ) ), diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py
index 036c539db2..37acb660e7 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py
@@ -43,6 +43,7 @@ from synapse.api.errors import ( OAuthInsufficientScopeError, SynapseError, ) +from synapse.appservice import ApplicationService from synapse.http.site import SynapseRequest from synapse.rest import admin from synapse.rest.client import account, devices, keys, login, logout, register @@ -146,6 +147,16 @@ class MSC3861OAuthDelegation(HomeserverTestCase): return hs + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + # Provision the user and the device we use in the tests. + store = homeserver.get_datastores().main + self.get_success(store.register_user(USER_ID)) + self.get_success( + store.store_device(USER_ID, DEVICE, initial_device_display_name=None) + ) + def _assertParams(self) -> None: """Assert that the request parameters are correct.""" params = parse_qs(self.http_client.request.call_args[1]["data"].decode("utf-8")) @@ -379,6 +390,44 @@ class MSC3861OAuthDelegation(HomeserverTestCase): ) self.assertEqual(requester.device_id, DEVICE) + def test_active_user_with_device_explicit_device_id(self) -> None: + """The handler should return a requester with normal user rights and a device ID, given explicitly, as supported by MAS 0.15+""" + + self.http_client.request = AsyncMock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_USER_SCOPE]), + "device_id": DEVICE, + "username": USERNAME, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.http_client.get_json.assert_called_once_with(WELL_KNOWN) + self.http_client.request.assert_called_once_with( + method="POST", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY + ) + # It should have called with the 'X-MAS-Supports-Device-Id: 1' header + self.assertEqual( + self.http_client.request.call_args[1]["headers"].getRawHeaders( + b"X-MAS-Supports-Device-Id", + ), + [b"1"], + ) + self._assertParams() + self.assertEqual(requester.user.to_string(), "@%s:%s" % (USERNAME, SERVER_NAME)) + self.assertEqual(requester.is_guest, False) + self.assertEqual( + get_awaitable_result(self.auth.is_server_admin(requester)), False + ) + self.assertEqual(requester.device_id, DEVICE) + def test_multiple_devices(self) -> None: """The handler should raise an error if multiple devices are found in the scope.""" @@ -500,6 +549,44 @@ class MSC3861OAuthDelegation(HomeserverTestCase): error = self.get_failure(self.auth.get_user_by_req(request), SynapseError) self.assertEqual(error.value.code, 503) + def test_cached_expired_introspection(self) -> None: + """The handler should raise an error if the introspection response gives + an expiry time, the introspection response is cached and then the entry is + re-requested after it has expired.""" + + self.http_client.request = introspection_mock = AsyncMock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join( + [ + MATRIX_USER_SCOPE, + f"{MATRIX_DEVICE_SCOPE_PREFIX}AABBCC", + ] + ), + "username": USERNAME, + "expires_in": 60, + }, + ) + ) + request = Mock(args={}) + request.args[b"access_token"] = [b"mockAccessToken"] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + + # The first CS-API request causes a successful introspection + self.get_success(self.auth.get_user_by_req(request)) + self.assertEqual(introspection_mock.call_count, 1) + + # Sleep for 60 seconds so the token expires. + self.reactor.advance(60.0) + + # Now the CS-API request fails because the token expired + self.get_failure(self.auth.get_user_by_req(request), InvalidClientTokenError) + # Ensure another introspection request was not sent + self.assertEqual(introspection_mock.call_count, 1) + def make_device_keys(self, user_id: str, device_id: str) -> JsonDict: # We only generate a master key to simplify the test. master_signing_key = generate_signing_key(device_id) @@ -550,7 +637,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): access_token="mockAccessToken", ) - self.assertEqual(channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) def expect_unauthorized( self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" @@ -560,15 +647,31 @@ class MSC3861OAuthDelegation(HomeserverTestCase): self.assertEqual(channel.code, 401, channel.json_body) def expect_unrecognized( - self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" + self, + method: str, + path: str, + content: Union[bytes, str, JsonDict] = "", + auth: bool = False, ) -> None: - channel = self.make_request(method, path, content) + channel = self.make_request( + method, path, content, access_token="token" if auth else None + ) self.assertEqual(channel.code, 404, channel.json_body) self.assertEqual( channel.json_body["errcode"], Codes.UNRECOGNIZED, channel.json_body ) + def expect_forbidden( + self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" + ) -> None: + channel = self.make_request(method, path, content) + + self.assertEqual(channel.code, 403, channel.json_body) + self.assertEqual( + channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body + ) + def test_uia_endpoints(self) -> None: """Test that endpoints that were removed in MSC2964 are no longer available.""" @@ -580,36 +683,6 @@ class MSC3861OAuthDelegation(HomeserverTestCase): "POST", "/_matrix/client/v3/keys/device_signing/upload" ) - def test_3pid_endpoints(self) -> None: - """Test that 3pid account management endpoints that were removed in MSC2964 are no longer available.""" - - # Remains and requires auth: - self.expect_unauthorized("GET", "/_matrix/client/v3/account/3pid") - self.expect_unauthorized( - "POST", - "/_matrix/client/v3/account/3pid/bind", - { - "client_secret": "foo", - "id_access_token": "bar", - "id_server": "foo", - "sid": "bar", - }, - ) - self.expect_unauthorized("POST", "/_matrix/client/v3/account/3pid/unbind", {}) - - # These are gone: - self.expect_unrecognized( - "POST", "/_matrix/client/v3/account/3pid" - ) # deprecated - self.expect_unrecognized("POST", "/_matrix/client/v3/account/3pid/add") - self.expect_unrecognized("POST", "/_matrix/client/v3/account/3pid/delete") - self.expect_unrecognized( - "POST", "/_matrix/client/v3/account/3pid/email/requestToken" - ) - self.expect_unrecognized( - "POST", "/_matrix/client/v3/account/3pid/msisdn/requestToken" - ) - def test_account_management_endpoints_removed(self) -> None: """Test that account management endpoints that were removed in MSC2964 are no longer available.""" self.expect_unrecognized("POST", "/_matrix/client/v3/account/deactivate") @@ -623,11 +696,35 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_registration_endpoints_removed(self) -> None: """Test that registration endpoints that were removed in MSC2964 are no longer available.""" + appservice = ApplicationService( + token="i_am_an_app_service", + id="1234", + namespaces={"users": [{"regex": r"@alice:.+", "exclusive": True}]}, + sender="@as_main:test", + ) + + self.hs.get_datastores().main.services_cache = [appservice] self.expect_unrecognized( "GET", "/_matrix/client/v1/register/m.login.registration_token/validity" ) + + # Registration is disabled + self.expect_forbidden( + "POST", + "/_matrix/client/v3/register", + {"username": "alice", "password": "hunter2"}, + ) + # This is still available for AS registrations - # self.expect_unrecognized("POST", "/_matrix/client/v3/register") + channel = self.make_request( + "POST", + "/_matrix/client/v3/register", + {"username": "alice", "type": "m.login.application_service"}, + shorthand=False, + access_token="i_am_an_app_service", + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.expect_unrecognized("GET", "/_matrix/client/v3/register/available") self.expect_unrecognized( "POST", "/_matrix/client/v3/register/email/requestToken" @@ -648,8 +745,25 @@ class MSC3861OAuthDelegation(HomeserverTestCase): def test_device_management_endpoints_removed(self) -> None: """Test that device management endpoints that were removed in MSC2964 are no longer available.""" - self.expect_unrecognized("POST", "/_matrix/client/v3/delete_devices") - self.expect_unrecognized("DELETE", "/_matrix/client/v3/devices/{DEVICE}") + + # Because we still support those endpoints with ASes, it checks the + # access token before returning 404 + self.http_client.request = AsyncMock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_USER_SCOPE, MATRIX_DEVICE_SCOPE]), + "username": USERNAME, + }, + ) + ) + + self.expect_unrecognized("POST", "/_matrix/client/v3/delete_devices", auth=True) + self.expect_unrecognized( + "DELETE", "/_matrix/client/v3/devices/{DEVICE}", auth=True + ) def test_openid_endpoints_removed(self) -> None: """Test that OpenID id_token endpoints that were removed in MSC2964 are no longer available.""" @@ -772,7 +886,7 @@ class MSC3861OAuthDelegation(HomeserverTestCase): req = SynapseRequest(channel, self.site) # type: ignore[arg-type] req.client.host = MAS_IPV4_ADDR req.requestHeaders.addRawHeader( - "Authorization", f"Bearer {self.auth._admin_token}" + "Authorization", f"Bearer {self.auth._admin_token()}" ) req.requestHeaders.addRawHeader("User-Agent", MAS_USER_AGENT) req.content = BytesIO(b"") diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py
index a81501979d..ff8e3c5cb6 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py
@@ -57,6 +57,7 @@ CLIENT_ID = "test-client-id" CLIENT_SECRET = "test-client-secret" BASE_URL = "https://synapse/" CALLBACK_URL = BASE_URL + "_synapse/client/oidc/callback" +TEST_REDIRECT_URI = "https://test/oidc/callback" SCOPES = ["openid"] # config for common cases @@ -70,12 +71,16 @@ DEFAULT_CONFIG = { } # extends the default config with explicit OAuth2 endpoints instead of using discovery +# +# We add "explicit" to things to make them different from the discovered values to make +# sure that the explicit values override the discovered ones. EXPLICIT_ENDPOINT_CONFIG = { **DEFAULT_CONFIG, "discover": False, - "authorization_endpoint": ISSUER + "authorize", - "token_endpoint": ISSUER + "token", - "jwks_uri": ISSUER + "jwks", + "authorization_endpoint": ISSUER + "authorize-explicit", + "token_endpoint": ISSUER + "token-explicit", + "jwks_uri": ISSUER + "jwks-explicit", + "id_token_signing_alg_values_supported": ["RS256", "<explicit>"], } @@ -259,12 +264,64 @@ class OidcHandlerTestCase(HomeserverTestCase): self.get_success(self.provider.load_metadata()) self.fake_server.get_metadata_handler.assert_not_called() + @override_config({"oidc_config": {**EXPLICIT_ENDPOINT_CONFIG, "discover": True}}) + def test_discovery_with_explicit_config(self) -> None: + """ + The handler should discover the endpoints from OIDC discovery document but + values are overriden by the explicit config. + """ + # This would throw if some metadata were invalid + metadata = self.get_success(self.provider.load_metadata()) + self.fake_server.get_metadata_handler.assert_called_once() + + self.assertEqual(metadata.issuer, self.fake_server.issuer) + # It seems like authlib does not have that defined in its metadata models + self.assertEqual( + metadata.get("userinfo_endpoint"), + self.fake_server.userinfo_endpoint, + ) + + # Ensure the values are overridden correctly since these were configured + # explicitly + self.assertEqual( + metadata.authorization_endpoint, + EXPLICIT_ENDPOINT_CONFIG["authorization_endpoint"], + ) + self.assertEqual( + metadata.token_endpoint, EXPLICIT_ENDPOINT_CONFIG["token_endpoint"] + ) + self.assertEqual(metadata.jwks_uri, EXPLICIT_ENDPOINT_CONFIG["jwks_uri"]) + self.assertEqual( + metadata.id_token_signing_alg_values_supported, + EXPLICIT_ENDPOINT_CONFIG["id_token_signing_alg_values_supported"], + ) + + # subsequent calls should be cached + self.reset_mocks() + self.get_success(self.provider.load_metadata()) + self.fake_server.get_metadata_handler.assert_not_called() + @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG}) def test_no_discovery(self) -> None: """When discovery is disabled, it should not try to load from discovery document.""" - self.get_success(self.provider.load_metadata()) + metadata = self.get_success(self.provider.load_metadata()) self.fake_server.get_metadata_handler.assert_not_called() + # Ensure the values are overridden correctly since these were configured + # explicitly + self.assertEqual( + metadata.authorization_endpoint, + EXPLICIT_ENDPOINT_CONFIG["authorization_endpoint"], + ) + self.assertEqual( + metadata.token_endpoint, EXPLICIT_ENDPOINT_CONFIG["token_endpoint"] + ) + self.assertEqual(metadata.jwks_uri, EXPLICIT_ENDPOINT_CONFIG["jwks_uri"]) + self.assertEqual( + metadata.id_token_signing_alg_values_supported, + EXPLICIT_ENDPOINT_CONFIG["id_token_signing_alg_values_supported"], + ) + @override_config({"oidc_config": DEFAULT_CONFIG}) def test_load_jwks(self) -> None: """JWKS loading is done once (then cached) if used.""" @@ -427,6 +484,32 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertEqual(code_verifier, "") self.assertEqual(redirect, "http://client/redirect") + @override_config( + { + "oidc_config": { + **DEFAULT_CONFIG, + "passthrough_authorization_parameters": ["additional_parameter"], + } + } + ) + def test_passthrough_parameters(self) -> None: + """The redirect request has additional parameters, one is authorized, one is not""" + req = Mock(spec=["cookies", "args"]) + req.cookies = [] + req.args = {} + req.args[b"additional_parameter"] = ["a_value".encode("utf-8")] + req.args[b"not_authorized_parameter"] = ["any".encode("utf-8")] + + url = urlparse( + self.get_success( + self.provider.handle_redirect_request(req, b"http://client/redirect") + ) + ) + + params = parse_qs(url.query) + self.assertEqual(params["additional_parameter"], ["a_value"]) + self.assertNotIn("not_authorized_parameters", params) + @override_config({"oidc_config": DEFAULT_CONFIG}) def test_redirect_request_with_code_challenge(self) -> None: """The redirect request has the right arguments & generates a valid session cookie.""" @@ -530,6 +613,24 @@ class OidcHandlerTestCase(HomeserverTestCase): code_verifier = get_value_from_macaroon(macaroon, "code_verifier") self.assertEqual(code_verifier, "") + @override_config( + {"oidc_config": {**DEFAULT_CONFIG, "redirect_uri": TEST_REDIRECT_URI}} + ) + def test_redirect_request_with_overridden_redirect_uri(self) -> None: + """The authorization endpoint redirect has the overridden `redirect_uri` value.""" + req = Mock(spec=["cookies"]) + req.cookies = [] + + url = urlparse( + self.get_success( + self.provider.handle_redirect_request(req, b"http://client/redirect") + ) + ) + + # Ensure that the redirect_uri in the returned url has been overridden. + params = parse_qs(url.query) + self.assertEqual(params["redirect_uri"], [TEST_REDIRECT_URI]) + @override_config({"oidc_config": DEFAULT_CONFIG}) def test_callback_error(self) -> None: """Errors from the provider returned in the callback are displayed.""" @@ -901,6 +1002,81 @@ class OidcHandlerTestCase(HomeserverTestCase): { "oidc_config": { **DEFAULT_CONFIG, + "redirect_uri": TEST_REDIRECT_URI, + } + } + ) + def test_code_exchange_with_overridden_redirect_uri(self) -> None: + """Code exchange behaves correctly and handles various error scenarios.""" + # Set up a fake IdP with a token endpoint handler. + token = { + "type": "Bearer", + "access_token": "aabbcc", + } + + self.fake_server.post_token_handler.side_effect = None + self.fake_server.post_token_handler.return_value = FakeResponse.json( + payload=token + ) + code = "code" + + # Exchange the code against the fake IdP. + self.get_success(self.provider._exchange_code(code, code_verifier="")) + + # Check that the `redirect_uri` parameter provided matches our + # overridden config value. + kwargs = self.fake_server.request.call_args[1] + args = parse_qs(kwargs["data"].decode("utf-8")) + self.assertEqual(args["redirect_uri"], [TEST_REDIRECT_URI]) + + @override_config( + { + "oidc_config": { + **DEFAULT_CONFIG, + "redirect_uri": TEST_REDIRECT_URI, + } + } + ) + def test_code_exchange_ignores_access_token(self) -> None: + """ + Code exchange completes successfully and doesn't validate the `at_hash` + (access token hash) field of an ID token when the access token isn't + going to be used. + + The access token won't be used in this test because Synapse (currently) + only needs it to fetch a user's metadata if it isn't included in the ID + token itself. + + Because we have included "openid" in the requested scopes for this IdP + (see `SCOPES`), user metadata is be included in the ID token. Thus the + access token isn't needed, and it's unnecessary for Synapse to validate + the access token. + + This is a regression test for a situation where an upstream identity + provider was providing an invalid `at_hash` value, which Synapse errored + on, yet Synapse wasn't using the access token for anything. + """ + # Exchange the code against the fake IdP. + userinfo = { + "sub": "foo", + "username": "foo", + "phone": "1234567", + } + with self.fake_server.id_token_override( + { + "at_hash": "invalid-hash", + } + ): + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + + # If no error was rendered, then we have success. + self.render_error.assert_not_called() + + @override_config( + { + "oidc_config": { + **DEFAULT_CONFIG, "user_mapping_provider": { "module": __name__ + ".TestMappingProviderExtra" }, @@ -1271,6 +1447,113 @@ class OidcHandlerTestCase(HomeserverTestCase): { "oidc_config": { **DEFAULT_CONFIG, + "attribute_requirements": [ + {"attribute": "test", "one_of": ["foo", "bar"]} + ], + } + } + ) + def test_attribute_requirements_one_of_succeeds(self) -> None: + """Test that auth succeeds if userinfo attribute has multiple values and CONTAINS required value""" + # userinfo with "test": ["bar"] attribute should succeed. + userinfo = { + "sub": "tester", + "username": "tester", + "test": ["bar"], + } + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + + # check that the auth handler got called as expected + self.complete_sso_login.assert_called_once_with( + "@tester:test", + self.provider.idp_id, + request, + ANY, + None, + new_user=True, + auth_provider_session_id=None, + ) + + @override_config( + { + "oidc_config": { + **DEFAULT_CONFIG, + "attribute_requirements": [ + {"attribute": "test", "one_of": ["foo", "bar"]} + ], + } + } + ) + def test_attribute_requirements_one_of_fails(self) -> None: + """Test that auth fails if userinfo attribute has multiple values yet + DOES NOT CONTAIN a required value + """ + # userinfo with "test": ["something else"] attribute should fail. + userinfo = { + "sub": "tester", + "username": "tester", + "test": ["something else"], + } + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() + + @override_config( + { + "oidc_config": { + **DEFAULT_CONFIG, + "attribute_requirements": [{"attribute": "test"}], + } + } + ) + def test_attribute_requirements_does_not_exist(self) -> None: + """OIDC login fails if the required attribute does not exist in the OIDC userinfo response.""" + # userinfo lacking "test" attribute should fail. + userinfo = { + "sub": "tester", + "username": "tester", + } + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() + + @override_config( + { + "oidc_config": { + **DEFAULT_CONFIG, + "attribute_requirements": [{"attribute": "test"}], + } + } + ) + def test_attribute_requirements_exist(self) -> None: + """OIDC login succeeds if the required attribute exist (regardless of value) + in the OIDC userinfo response. + """ + # userinfo with "test" attribute and random value should succeed. + userinfo = { + "sub": "tester", + "username": "tester", + "test": random_string(5), # value does not matter + } + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + + # check that the auth handler got called as expected + self.complete_sso_login.assert_called_once_with( + "@tester:test", + self.provider.idp_id, + request, + ANY, + None, + new_user=True, + auth_provider_session_id=None, + ) + + @override_config( + { + "oidc_config": { + **DEFAULT_CONFIG, "attribute_requirements": [{"attribute": "test", "value": "foobar"}], } } diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py
index ed203eb299..d0351a8509 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py
@@ -768,17 +768,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # Check that the callback has been called. m.assert_called_once() - # Set some email configuration so the test doesn't fail because of its absence. - @override_config({"email": {"notif_from": "noreply@test"}}) - def test_3pid_allowed(self) -> None: - """Tests that an is_3pid_allowed_callbacks forbidding a 3PID makes Synapse refuse - to bind the new 3PID, and that one allowing a 3PID makes Synapse accept to bind - the 3PID. Also checks that the module is passed a boolean indicating whether the - user to bind this 3PID to is currently registering. - """ - self._test_3pid_allowed("rin", False) - self._test_3pid_allowed("kitay", True) - def test_displayname(self) -> None: """Tests that the get_displayname_for_registration callback can define the display name of a user when registering. @@ -829,66 +818,6 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): # Check that the callback has been called. m.assert_called_once() - def _test_3pid_allowed(self, username: str, registration: bool) -> None: - """Tests that the "is_3pid_allowed" module callback is called correctly, using - either /register or /account URLs depending on the arguments. - - Args: - username: The username to use for the test. - registration: Whether to test with registration URLs. - """ - self.hs.get_identity_handler().send_threepid_validation = AsyncMock( # type: ignore[method-assign] - return_value=0 - ) - - m = AsyncMock(return_value=False) - self.hs.get_password_auth_provider().is_3pid_allowed_callbacks = [m] - - self.register_user(username, "password") - tok = self.login(username, "password") - - if registration: - url = "/register/email/requestToken" - else: - url = "/account/3pid/email/requestToken" - - channel = self.make_request( - "POST", - url, - { - "client_secret": "foo", - "email": "foo@test.com", - "send_attempt": 0, - }, - access_token=tok, - ) - self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) - self.assertEqual( - channel.json_body["errcode"], - Codes.THREEPID_DENIED, - channel.json_body, - ) - - m.assert_called_once_with("email", "foo@test.com", registration) - - m = AsyncMock(return_value=True) - self.hs.get_password_auth_provider().is_3pid_allowed_callbacks = [m] - - channel = self.make_request( - "POST", - url, - { - "client_secret": "foo", - "email": "bar@test.com", - "send_attempt": 0, - }, - access_token=tok, - ) - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - self.assertIn("sid", channel.json_body) - - m.assert_called_once_with("email", "bar@test.com", registration) - def _setup_get_name_for_registration(self, callback_name: str) -> Mock: """Registers either a get_username_for_registration callback or a get_displayname_for_registration callback that appends "-foo" to the username the diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index cc630d606c..6b7bf112c2 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py
@@ -23,14 +23,21 @@ from typing import Optional, cast from unittest.mock import Mock, call from parameterized import parameterized -from signedjson.key import generate_signing_key +from signedjson.key import ( + encode_verify_key_base64, + generate_signing_key, + get_verify_key, +) from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, Membership, PresenceState from synapse.api.presence import UserDevicePresenceState, UserPresenceState -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.events.builder import EventBuilder +from synapse.api.room_versions import ( + RoomVersion, +) +from synapse.crypto.event_signing import add_hashes_and_signatures +from synapse.events import EventBase, make_event_from_dict from synapse.federation.sender import FederationSender from synapse.handlers.presence import ( BUSY_ONLINE_TIMEOUT, @@ -45,18 +52,24 @@ from synapse.handlers.presence import ( handle_update, ) from synapse.rest import admin -from synapse.rest.client import room +from synapse.rest.client import login, room, sync from synapse.server import HomeServer from synapse.storage.database import LoggingDatabaseConnection +from synapse.storage.keys import FetchKeyResult from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util import Clock from tests import unittest from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.unittest import override_config class PresenceUpdateTestCase(unittest.HomeserverTestCase): - servlets = [admin.register_servlets] + servlets = [ + admin.register_servlets, + login.register_servlets, + sync.register_servlets, + ] def prepare( self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer @@ -425,6 +438,102 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): wheel_timer.insert.assert_not_called() + # `rc_presence` is set very high during unit tests to avoid ratelimiting + # subtly impacting unrelated tests. We set the ratelimiting back to a + # reasonable value for the tests specific to presence ratelimiting. + @override_config( + {"rc_presence": {"per_user": {"per_second": 0.1, "burst_count": 1}}} + ) + def test_over_ratelimit_offline_to_online_to_unavailable(self) -> None: + """ + Send a presence update, check that it went through, immediately send another one and + check that it was ignored. + """ + self._test_ratelimit_offline_to_online_to_unavailable(ratelimited=True) + + @override_config( + {"rc_presence": {"per_user": {"per_second": 0.1, "burst_count": 1}}} + ) + def test_within_ratelimit_offline_to_online_to_unavailable(self) -> None: + """ + Send a presence update, check that it went through, advancing time a sufficient amount, + send another presence update and check that it also worked. + """ + self._test_ratelimit_offline_to_online_to_unavailable(ratelimited=False) + + @override_config( + {"rc_presence": {"per_user": {"per_second": 0.1, "burst_count": 1}}} + ) + def _test_ratelimit_offline_to_online_to_unavailable( + self, ratelimited: bool + ) -> None: + """Test rate limit for presence updates sent with sync requests. + + Args: + ratelimited: Test rate limited case. + """ + wheel_timer = Mock() + user_id = "@user:pass" + now = 5000000 + sync_url = "/sync?access_token=%s&set_presence=%s" + + # Register the user who syncs presence + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Get the handler (which kicks off a bunch of timers). + presence_handler = self.hs.get_presence_handler() + + # Ensure the user is initially offline. + prev_state = UserPresenceState.default(user_id) + new_state = prev_state.copy_and_replace( + state=PresenceState.OFFLINE, last_active_ts=now + ) + + state, persist_and_notify, federation_ping = handle_update( + prev_state, + new_state, + is_mine=True, + wheel_timer=wheel_timer, + now=now, + persist=False, + ) + + # Check that the user is offline. + state = self.get_success( + presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, PresenceState.OFFLINE) + + # Send sync request with set_presence=online. + channel = self.make_request("GET", sync_url % (access_token, "online")) + self.assertEqual(200, channel.code) + + # Assert the user is now online. + state = self.get_success( + presence_handler.get_state(UserID.from_string(user_id)) + ) + self.assertEqual(state.state, PresenceState.ONLINE) + + if not ratelimited: + # Advance time a sufficient amount to avoid rate limiting. + self.reactor.advance(30) + + # Send another sync request with set_presence=unavailable. + channel = self.make_request("GET", sync_url % (access_token, "unavailable")) + self.assertEqual(200, channel.code) + + state = self.get_success( + presence_handler.get_state(UserID.from_string(user_id)) + ) + + if ratelimited: + # Assert the user is still online and presence update was ignored. + self.assertEqual(state.state, PresenceState.ONLINE) + else: + # Assert the user is now unavailable. + self.assertEqual(state.state, PresenceState.UNAVAILABLE) + class PresenceTimeoutTestCase(unittest.TestCase): """Tests different timers and that the timer does not change `status_msg` of user.""" @@ -1107,7 +1216,9 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): ), ] ], - name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[5] else 'monolith'}", + name_func=lambda testcase_func, + param_num, + params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[5] else 'monolith'}", ) @unittest.override_config({"experimental_features": {"msc3026_enabled": True}}) def test_set_presence_from_syncing_multi_device( @@ -1343,7 +1454,9 @@ class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): ), ] ], - name_func=lambda testcase_func, param_num, params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[4] else 'monolith'}", + name_func=lambda testcase_func, + param_num, + params: f"{testcase_func.__name__}_{param_num}_{'workers' if params.args[4] else 'monolith'}", ) @unittest.override_config({"experimental_features": {"msc3026_enabled": True}}) def test_set_presence_from_non_syncing_multi_device( @@ -1821,6 +1934,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): # self.event_builder_for_2.hostname = "test2" self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() self.state = hs.get_state_handler() self._event_auth_handler = hs.get_event_auth_handler() @@ -1936,29 +2050,35 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): hostname = get_domain_from_id(user_id) - room_version = self.get_success(self.store.get_room_version_id(room_id)) + room_version = self.get_success(self.store.get_room_version(room_id)) - builder = EventBuilder( - state=self.state, - event_auth_handler=self._event_auth_handler, - store=self.store, - clock=self.clock, - hostname=hostname, - signing_key=self.random_signing_key, - room_version=KNOWN_ROOM_VERSIONS[room_version], - room_id=room_id, - type=EventTypes.Member, - sender=user_id, - state_key=user_id, - content={"membership": Membership.JOIN}, + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) ) - prev_event_ids = self.get_success( - self.store.get_latest_event_ids_in_room(room_id) + # Figure out what the forward extremities in the room are (the most recent + # events that aren't tied into the DAG) + forward_extremity_event_ids = self.get_success( + self.hs.get_datastores().main.get_latest_event_ids_in_room(room_id) ) - event = self.get_success( - builder.build(prev_event_ids=list(prev_event_ids), auth_event_ids=None) + event = self.create_fake_event_from_remote_server( + remote_server_name=hostname, + event_dict={ + "room_id": room_id, + "sender": user_id, + "type": EventTypes.Member, + "state_key": user_id, + "depth": 1000, + "origin_server_ts": 1, + "content": {"membership": Membership.JOIN}, + "auth_events": [ + state_map[(EventTypes.Create, "")].event_id, + state_map[(EventTypes.JoinRules, "")].event_id, + ], + "prev_events": list(forward_extremity_event_ids), + }, + room_version=room_version, ) self.get_success(self.federation_event_handler.on_receive_pdu(hostname, event)) @@ -1966,3 +2086,50 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): # Check that it was successfully persisted. self.get_success(self.store.get_event(event.event_id)) self.get_success(self.store.get_event(event.event_id)) + + def create_fake_event_from_remote_server( + self, remote_server_name: str, event_dict: JsonDict, room_version: RoomVersion + ) -> EventBase: + """ + This is similar to what `FederatingHomeserverTestCase` is doing but we don't + need all of the extra baggage and we want to be able to create an event from + many remote servers. + """ + + # poke the other server's signing key into the key store, so that we don't + # make requests for it + other_server_signature_key = generate_signing_key("test") + verify_key = get_verify_key(other_server_signature_key) + verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version) + + self.get_success( + self.hs.get_datastores().main.store_server_keys_response( + remote_server_name, + from_server=remote_server_name, + ts_added_ms=self.clock.time_msec(), + verify_keys={ + verify_key_id: FetchKeyResult( + verify_key=verify_key, + valid_until_ts=self.clock.time_msec() + 10000, + ), + }, + response_json={ + "verify_keys": { + verify_key_id: {"key": encode_verify_key_base64(verify_key)} + } + }, + ) + ) + + add_hashes_and_signatures( + room_version=room_version, + event_dict=event_dict, + signature_name=remote_server_name, + signing_key=other_server_signature_key, + ) + event = make_event_from_dict( + event_dict, + room_version=room_version, + ) + + return event diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index cb1c6fbb80..2b9b56da95 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py
@@ -369,6 +369,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): time_now_ms=self.clock.time_msec(), upload_name=None, filesystem_id="xyz", + sha256="abcdefg12345", ) ) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 92487692db..99bd0de834 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py
@@ -588,6 +588,29 @@ class RegistrationTestCase(unittest.HomeserverTestCase): d = self.store.is_support_user(user_id) self.assertFalse(self.get_success(d)) + def test_underscore_localpart_rejected_by_default(self) -> None: + for invalid_user_id in ("_", "_prefixed"): + with self.subTest(invalid_user_id=invalid_user_id): + self.get_failure( + self.handler.register_user(localpart=invalid_user_id), + SynapseError, + ) + + @override_config( + { + "allow_underscore_prefixed_localpart": True, + } + ) + def test_underscore_localpart_allowed_if_configured(self) -> None: + for valid_user_id in ("_", "_prefixed"): + with self.subTest(valid_user_id=valid_user_id): + user_id = self.get_success( + self.handler.register_user( + localpart=valid_user_id, + ), + ) + self.assertEqual(user_id, f"@{valid_user_id}:test") + def test_invalid_user_id(self) -> None: invalid_user_id = "^abcd" self.get_failure( @@ -715,6 +738,41 @@ class RegistrationTestCase(unittest.HomeserverTestCase): self.handler.register_user(localpart="bobflimflob", auth_provider_id="saml") ) + def test_register_default_user_type(self) -> None: + """Test that the default user type is none when registering a user.""" + user_id = self.get_success(self.handler.register_user(localpart="user")) + user_info = self.get_success(self.store.get_user_by_id(user_id)) + assert user_info is not None + self.assertEqual(user_info.user_type, None) + + def test_register_extra_user_types_valid(self) -> None: + """ + Test that the specified user type is set correctly when registering a user. + n.b. No validation is done on the user type, so this test + is only to ensure that the user type can be set to any value. + """ + user_id = self.get_success( + self.handler.register_user(localpart="user", user_type="anyvalue") + ) + user_info = self.get_success(self.store.get_user_by_id(user_id)) + assert user_info is not None + self.assertEqual(user_info.user_type, "anyvalue") + + @override_config( + { + "user_types": { + "extra_user_types": ["extra1", "extra2"], + "default_user_type": "extra1", + } + } + ) + def test_register_extra_user_types_with_default(self) -> None: + """Test that the default_user_type in config is set correctly when registering a user.""" + user_id = self.get_success(self.handler.register_user(localpart="user")) + user_info = self.get_success(self.store.get_user_by_id(user_id)) + assert user_info is not None + self.assertEqual(user_info.user_type, "extra1") + async def get_or_create_user( self, requester: Requester, diff --git a/tests/handlers/test_room_list.py b/tests/handlers/test_room_list.py
index 4d22ef98c2..45cef09b22 100644 --- a/tests/handlers/test_room_list.py +++ b/tests/handlers/test_room_list.py
@@ -6,6 +6,7 @@ from synapse.rest.client import directory, login, room from synapse.types import JsonDict from tests import unittest +from tests.utils import default_config class RoomListHandlerTestCase(unittest.HomeserverTestCase): @@ -30,6 +31,11 @@ class RoomListHandlerTestCase(unittest.HomeserverTestCase): assert channel.code == HTTPStatus.OK, f"couldn't publish room: {channel.result}" return room_id + def default_config(self) -> JsonDict: + config = default_config("test") + config["room_list_publication_rules"] = [{"action": "allow"}] + return config + def test_acls_applied_to_room_directory_results(self) -> None: """ Creates 3 rooms. Room 2 has an ACL that only permits the homeservers diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py
index 213a66ed1a..d87fe9d62c 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py
@@ -5,10 +5,13 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin import synapse.rest.client.login import synapse.rest.client.room -from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import LimitExceededError, SynapseError +from synapse.api.constants import AccountDataTypes, EventTypes, Membership +from synapse.api.errors import Codes, LimitExceededError, SynapseError from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import FrozenEventV3 +from synapse.federation.federation_base import ( + event_from_pdu_json, +) from synapse.federation.federation_client import SendJoinResult from synapse.server import HomeServer from synapse.types import UserID, create_requester @@ -172,20 +175,25 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase): ) ) - with patch.object( - self.handler.federation_handler.federation_client, - "make_membership_event", - mock_make_membership_event, - ), patch.object( - self.handler.federation_handler.federation_client, - "send_join", - mock_send_join, - ), patch( - "synapse.event_auth._is_membership_change_allowed", - return_value=None, - ), patch( - "synapse.handlers.federation_event.check_state_dependent_auth_rules", - return_value=None, + with ( + patch.object( + self.handler.federation_handler.federation_client, + "make_membership_event", + mock_make_membership_event, + ), + patch.object( + self.handler.federation_handler.federation_client, + "send_join", + mock_send_join, + ), + patch( + "synapse.event_auth._is_membership_change_allowed", + return_value=None, + ), + patch( + "synapse.handlers.federation_event.check_state_dependent_auth_rules", + return_value=None, + ), ): self.get_success( self.handler.update_membership( @@ -380,9 +388,29 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase): ) def test_forget_when_not_left(self) -> None: - """Tests that a user cannot not forgets a room that has not left.""" + """Tests that a user cannot forget a room that they are still in.""" self.get_failure(self.handler.forget(self.alice_ID, self.room_id), SynapseError) + def test_nonlocal_room_user_action(self) -> None: + """ + Test that non-local user ids cannot perform room actions through + this homeserver. + """ + alien_user_id = UserID.from_string("@cheeky_monkey:matrix.org") + bad_room_id = f"{self.room_id}+BAD_ID" + + exc = self.get_failure( + self.handler.update_membership( + create_requester(self.alice), + alien_user_id, + bad_room_id, + "unban", + ), + SynapseError, + ).value + + self.assertEqual(exc.errcode, Codes.BAD_JSON) + def test_rejoin_forgotten_by_user(self) -> None: """Test that a user that has forgotten a room can do a re-join. The room was not forgotten from the local server. @@ -428,3 +456,165 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase): new_count = rows[0][0] self.assertEqual(initial_count, new_count) + + +class TestInviteFiltering(FederatingHomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.client.login.register_servlets, + synapse.rest.client.room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.handler = hs.get_room_member_handler() + self.fed_handler = hs.get_federation_handler() + self.store = hs.get_datastores().main + + # Create three users. + self.alice = self.register_user("alice", "pass") + self.alice_token = self.login("alice", "pass") + self.bob = self.register_user("bob", "pass") + self.bob_token = self.login("bob", "pass") + + @override_config({"experimental_features": {"msc4155_enabled": True}}) + def test_misc4155_block_invite_local(self) -> None: + """Test that MSC4155 will block a user from being invited to a room""" + room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4155_INVITE_PERMISSION_CONFIG, + { + "blocked_users": [self.alice], + }, + ) + ) + + f = self.get_failure( + self.handler.update_membership( + requester=create_requester(self.alice), + target=UserID.from_string(self.bob), + room_id=room_id, + action=Membership.INVITE, + ), + SynapseError, + ).value + self.assertEqual(f.code, 403) + self.assertEqual(f.errcode, "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED") + + @override_config({"experimental_features": {"msc4155_enabled": False}}) + def test_msc4155_disabled_allow_invite_local(self) -> None: + """Test that MSC4155 will block a user from being invited to a room""" + room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4155_INVITE_PERMISSION_CONFIG, + { + "blocked_users": [self.alice], + }, + ) + ) + + self.get_success( + self.handler.update_membership( + requester=create_requester(self.alice), + target=UserID.from_string(self.bob), + room_id=room_id, + action=Membership.INVITE, + ), + ) + + @override_config({"experimental_features": {"msc4155_enabled": True}}) + def test_msc4155_block_invite_remote(self) -> None: + """Test that MSC4155 will block a remote user from being invited to a room""" + # A remote user who sends the invite + remote_server = "otherserver" + remote_user = "@otheruser:" + remote_server + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4155_INVITE_PERMISSION_CONFIG, + {"blocked_users": [remote_user]}, + ) + ) + + room_id = self.helper.create_room_as( + room_creator=self.alice, tok=self.alice_token + ) + room_version = self.get_success(self.store.get_room_version(room_id)) + + invite_event = event_from_pdu_json( + { + "type": EventTypes.Member, + "content": {"membership": "invite"}, + "room_id": room_id, + "sender": remote_user, + "state_key": self.bob, + "depth": 32, + "prev_events": [], + "auth_events": [], + "origin_server_ts": self.clock.time_msec(), + }, + room_version, + ) + + f = self.get_failure( + self.fed_handler.on_invite_request( + remote_server, + invite_event, + invite_event.room_version, + ), + SynapseError, + ).value + self.assertEqual(f.code, 403) + self.assertEqual(f.errcode, "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED") + + @override_config({"experimental_features": {"msc4155_enabled": True}}) + def test_msc4155_block_invite_remote_server(self) -> None: + """Test that MSC4155 will block a remote server's user from being invited to a room""" + # A remote user who sends the invite + remote_server = "otherserver" + remote_user = "@otheruser:" + remote_server + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4155_INVITE_PERMISSION_CONFIG, + {"blocked_servers": [remote_server]}, + ) + ) + + room_id = self.helper.create_room_as( + room_creator=self.alice, tok=self.alice_token + ) + room_version = self.get_success(self.store.get_room_version(room_id)) + + invite_event = event_from_pdu_json( + { + "type": EventTypes.Member, + "content": {"membership": "invite"}, + "room_id": room_id, + "sender": remote_user, + "state_key": self.bob, + "depth": 32, + "prev_events": [], + "auth_events": [], + "origin_server_ts": self.clock.time_msec(), + }, + room_version, + ) + + f = self.get_failure( + self.fed_handler.on_invite_request( + remote_server, + invite_event, + invite_event.room_version, + ), + SynapseError, + ).value + self.assertEqual(f.code, 403) + self.assertEqual(f.errcode, "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED") diff --git a/tests/handlers/test_room_policy.py b/tests/handlers/test_room_policy.py new file mode 100644
index 0000000000..26642c18ea --- /dev/null +++ b/tests/handlers/test_room_policy.py
@@ -0,0 +1,226 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# +from typing import Optional +from unittest import mock + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.events import EventBase, make_event_from_dict +from synapse.rest import admin +from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.types import JsonDict, UserID +from synapse.types.handlers.policy_server import RECOMMENDATION_OK, RECOMMENDATION_SPAM +from synapse.util import Clock + +from tests import unittest +from tests.test_utils import event_injection + + +class RoomPolicyTestCase(unittest.FederatingHomeserverTestCase): + """Tests room policy handler.""" + + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + # mock out the federation transport client + self.mock_federation_transport_client = mock.Mock( + spec=["get_policy_recommendation_for_pdu"] + ) + self.mock_federation_transport_client.get_policy_recommendation_for_pdu = ( + mock.AsyncMock() + ) + return super().setup_test_homeserver( + federation_transport_client=self.mock_federation_transport_client + ) + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.hs = hs + self.handler = hs.get_room_policy_handler() + main_store = self.hs.get_datastores().main + + # Create a room + self.creator = self.register_user("creator", "test1234") + self.creator_token = self.login("creator", "test1234") + self.room_id = self.helper.create_room_as( + room_creator=self.creator, tok=self.creator_token + ) + room_version = self.get_success(main_store.get_room_version(self.room_id)) + + # Create some sample events + self.spammy_event = make_event_from_dict( + room_version=room_version, + internal_metadata_dict={}, + event_dict={ + "room_id": self.room_id, + "type": "m.room.message", + "sender": "@spammy:example.org", + "content": { + "msgtype": "m.text", + "body": "This is a spammy event.", + }, + }, + ) + self.not_spammy_event = make_event_from_dict( + room_version=room_version, + internal_metadata_dict={}, + event_dict={ + "room_id": self.room_id, + "type": "m.room.message", + "sender": "@not_spammy:example.org", + "content": { + "msgtype": "m.text", + "body": "This is a NOT spammy event.", + }, + }, + ) + + # Prepare the policy server mock to decide spam vs not spam on those events + self.call_count = 0 + + async def get_policy_recommendation_for_pdu( + destination: str, + pdu: EventBase, + timeout: Optional[int] = None, + ) -> JsonDict: + self.call_count += 1 + self.assertEqual(destination, self.OTHER_SERVER_NAME) + if pdu.event_id == self.spammy_event.event_id: + return {"recommendation": RECOMMENDATION_SPAM} + elif pdu.event_id == self.not_spammy_event.event_id: + return {"recommendation": RECOMMENDATION_OK} + else: + self.fail("Unexpected event ID") + + self.mock_federation_transport_client.get_policy_recommendation_for_pdu.side_effect = get_policy_recommendation_for_pdu + + def _add_policy_server_to_room(self) -> None: + # Inject a member event into the room + policy_user_id = f"@policy:{self.OTHER_SERVER_NAME}" + self.get_success( + event_injection.inject_member_event( + self.hs, self.room_id, policy_user_id, "join" + ) + ) + self.helper.send_state( + self.room_id, + "org.matrix.msc4284.policy", + { + "via": self.OTHER_SERVER_NAME, + }, + tok=self.creator_token, + state_key="", + ) + + def test_no_policy_event_set(self) -> None: + # We don't need to modify the room state at all - we're testing the default + # case where a room doesn't use a policy server. + ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) + self.assertEqual(ok, True) + self.assertEqual(self.call_count, 0) + + def test_empty_policy_event_set(self) -> None: + self.helper.send_state( + self.room_id, + "org.matrix.msc4284.policy", + { + # empty content (no `via`) + }, + tok=self.creator_token, + state_key="", + ) + + ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) + self.assertEqual(ok, True) + self.assertEqual(self.call_count, 0) + + def test_nonstring_policy_event_set(self) -> None: + self.helper.send_state( + self.room_id, + "org.matrix.msc4284.policy", + { + "via": 42, # should be a server name + }, + tok=self.creator_token, + state_key="", + ) + + ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) + self.assertEqual(ok, True) + self.assertEqual(self.call_count, 0) + + def test_self_policy_event_set(self) -> None: + self.helper.send_state( + self.room_id, + "org.matrix.msc4284.policy", + { + # We ignore events when the policy server is ourselves (for now?) + "via": (UserID.from_string(self.creator)).domain, + }, + tok=self.creator_token, + state_key="", + ) + + ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) + self.assertEqual(ok, True) + self.assertEqual(self.call_count, 0) + + def test_invalid_server_policy_event_set(self) -> None: + self.helper.send_state( + self.room_id, + "org.matrix.msc4284.policy", + { + "via": "|this| is *not* a (valid) server name.com", + }, + tok=self.creator_token, + state_key="", + ) + + ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) + self.assertEqual(ok, True) + self.assertEqual(self.call_count, 0) + + def test_not_in_room_policy_event_set(self) -> None: + self.helper.send_state( + self.room_id, + "org.matrix.msc4284.policy", + { + "via": f"x.{self.OTHER_SERVER_NAME}", + }, + tok=self.creator_token, + state_key="", + ) + + ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) + self.assertEqual(ok, True) + self.assertEqual(self.call_count, 0) + + def test_spammy_event_is_spam(self) -> None: + self._add_policy_server_to_room() + + ok = self.get_success(self.handler.is_event_allowed(self.spammy_event)) + self.assertEqual(ok, False) + self.assertEqual(self.call_count, 1) + + def test_not_spammy_event_is_not_spam(self) -> None: + self._add_policy_server_to_room() + + ok = self.get_success(self.handler.is_event_allowed(self.not_spammy_event)) + self.assertEqual(ok, True) + self.assertEqual(self.call_count, 1) diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py
index 244a4e7689..b55fa1a8fd 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py
@@ -757,6 +757,54 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): ) self._assert_hierarchy(result, expected) + def test_fed_root(self) -> None: + """ + Test if requested room is available over federation. + """ + fed_hostname = self.hs.hostname + "2" + fed_space = "#fed_space:" + fed_hostname + fed_subroom = "#fed_sub_room:" + fed_hostname + + requested_room_entry = _RoomEntry( + fed_space, + { + "room_id": fed_space, + "world_readable": True, + "room_type": RoomTypes.SPACE, + }, + [ + { + "type": EventTypes.SpaceChild, + "room_id": fed_space, + "state_key": fed_subroom, + "content": {"via": [fed_hostname]}, + } + ], + ) + child_room = { + "room_id": fed_subroom, + "world_readable": True, + } + + async def summarize_remote_room_hierarchy( + _self: Any, room: Any, suggested_only: bool + ) -> Tuple[Optional[_RoomEntry], Dict[str, JsonDict], Set[str]]: + return requested_room_entry, {fed_subroom: child_room}, set() + + expected = [ + (fed_space, [fed_subroom]), + (fed_subroom, ()), + ] + + with mock.patch( + "synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy", + new=summarize_remote_room_hierarchy, + ): + result = self.get_success( + self.handler.get_room_hierarchy(create_requester(self.user), fed_space) + ) + self._assert_hierarchy(result, expected) + def test_fed_filtering(self) -> None: """ Rooms returned over federation should be properly filtered to only include diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py deleted file mode 100644
index 6ab8fda6e7..0000000000 --- a/tests/handlers/test_saml.py +++ /dev/null
@@ -1,381 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2020 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -from typing import Any, Dict, Optional, Set, Tuple -from unittest.mock import AsyncMock, Mock - -import attr - -from twisted.test.proto_helpers import MemoryReactor - -from synapse.api.errors import RedirectException -from synapse.module_api import ModuleApi -from synapse.server import HomeServer -from synapse.types import JsonDict -from synapse.util import Clock - -from tests.unittest import HomeserverTestCase, override_config - -# Check if we have the dependencies to run the tests. -try: - import saml2.config - import saml2.response - from saml2.sigver import SigverError - - has_saml2 = True - - # pysaml2 can be installed and imported, but might not be able to find xmlsec1. - config = saml2.config.SPConfig() - try: - config.load({"metadata": {}}) - has_xmlsec1 = True - except SigverError: - has_xmlsec1 = False -except ImportError: - has_saml2 = False - has_xmlsec1 = False - -# These are a few constants that are used as config parameters in the tests. -BASE_URL = "https://synapse/" - - -@attr.s -class FakeAuthnResponse: - ava = attr.ib(type=dict) - assertions = attr.ib(type=list, factory=list) - in_response_to = attr.ib(type=Optional[str], default=None) - - -class TestMappingProvider: - def __init__(self, config: None, module: ModuleApi): - pass - - @staticmethod - def parse_config(config: JsonDict) -> None: - return None - - @staticmethod - def get_saml_attributes(config: None) -> Tuple[Set[str], Set[str]]: - return {"uid"}, {"displayName"} - - def get_remote_user_id( - self, saml_response: "saml2.response.AuthnResponse", client_redirect_url: str - ) -> str: - return saml_response.ava["uid"] - - def saml_response_to_user_attributes( - self, - saml_response: "saml2.response.AuthnResponse", - failures: int, - client_redirect_url: str, - ) -> dict: - localpart = saml_response.ava["username"] + (str(failures) if failures else "") - return {"mxid_localpart": localpart, "displayname": None} - - -class TestRedirectMappingProvider(TestMappingProvider): - def saml_response_to_user_attributes( - self, - saml_response: "saml2.response.AuthnResponse", - failures: int, - client_redirect_url: str, - ) -> dict: - raise RedirectException(b"https://custom-saml-redirect/") - - -class SamlHandlerTestCase(HomeserverTestCase): - def default_config(self) -> Dict[str, Any]: - config = super().default_config() - config["public_baseurl"] = BASE_URL - saml_config: Dict[str, Any] = { - "sp_config": {"metadata": {}}, - # Disable grandfathering. - "grandfathered_mxid_source_attribute": None, - "user_mapping_provider": {"module": __name__ + ".TestMappingProvider"}, - } - - # Update this config with what's in the default config so that - # override_config works as expected. - saml_config.update(config.get("saml2_config", {})) - config["saml2_config"] = saml_config - - return config - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - hs = self.setup_test_homeserver() - - self.handler = hs.get_saml_handler() - - # Reduce the number of attempts when generating MXIDs. - sso_handler = hs.get_sso_handler() - sso_handler._MAP_USERNAME_RETRIES = 3 - - return hs - - if not has_saml2: - skip = "Requires pysaml2" - elif not has_xmlsec1: - skip = "Requires xmlsec1" - - def test_map_saml_response_to_user(self) -> None: - """Ensure that mapping the SAML response returned from a provider to an MXID works properly.""" - - # stub out the auth handler - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] - - # send a mocked-up SAML response to the callback - saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) - request = _mock_request() - self.get_success( - self.handler._handle_authn_response(request, saml_response, "redirect_uri") - ) - - # check that the auth handler got called as expected - auth_handler.complete_sso_login.assert_called_once_with( - "@test_user:test", - "saml", - request, - "redirect_uri", - None, - new_user=True, - auth_provider_session_id=None, - ) - - @override_config({"saml2_config": {"grandfathered_mxid_source_attribute": "mxid"}}) - def test_map_saml_response_to_existing_user(self) -> None: - """Existing users can log in with SAML account.""" - store = self.hs.get_datastores().main - self.get_success( - store.register_user(user_id="@test_user:test", password_hash=None) - ) - - # stub out the auth handler - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] - - # Map a user via SSO. - saml_response = FakeAuthnResponse( - {"uid": "tester", "mxid": ["test_user"], "username": "test_user"} - ) - request = _mock_request() - self.get_success( - self.handler._handle_authn_response(request, saml_response, "") - ) - - # check that the auth handler got called as expected - auth_handler.complete_sso_login.assert_called_once_with( - "@test_user:test", - "saml", - request, - "", - None, - new_user=False, - auth_provider_session_id=None, - ) - - # Subsequent calls should map to the same mxid. - auth_handler.complete_sso_login.reset_mock() - self.get_success( - self.handler._handle_authn_response(request, saml_response, "") - ) - auth_handler.complete_sso_login.assert_called_once_with( - "@test_user:test", - "saml", - request, - "", - None, - new_user=False, - auth_provider_session_id=None, - ) - - def test_map_saml_response_to_invalid_localpart(self) -> None: - """If the mapping provider generates an invalid localpart it should be rejected.""" - - # stub out the auth handler - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] - - # mock out the error renderer too - sso_handler = self.hs.get_sso_handler() - sso_handler.render_error = Mock(return_value=None) # type: ignore[method-assign] - - saml_response = FakeAuthnResponse({"uid": "test", "username": "föö"}) - request = _mock_request() - self.get_success( - self.handler._handle_authn_response(request, saml_response, ""), - ) - sso_handler.render_error.assert_called_once_with( - request, "mapping_error", "localpart is invalid: föö" - ) - auth_handler.complete_sso_login.assert_not_called() - - def test_map_saml_response_to_user_retries(self) -> None: - """The mapping provider can retry generating an MXID if the MXID is already in use.""" - - # stub out the auth handler and error renderer - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] - sso_handler = self.hs.get_sso_handler() - sso_handler.render_error = Mock(return_value=None) # type: ignore[method-assign] - - # register a user to occupy the first-choice MXID - store = self.hs.get_datastores().main - self.get_success( - store.register_user(user_id="@test_user:test", password_hash=None) - ) - - # send the fake SAML response - saml_response = FakeAuthnResponse({"uid": "test", "username": "test_user"}) - request = _mock_request() - self.get_success( - self.handler._handle_authn_response(request, saml_response, ""), - ) - - # test_user is already taken, so test_user1 gets registered instead. - auth_handler.complete_sso_login.assert_called_once_with( - "@test_user1:test", - "saml", - request, - "", - None, - new_user=True, - auth_provider_session_id=None, - ) - auth_handler.complete_sso_login.reset_mock() - - # Register all of the potential mxids for a particular SAML username. - self.get_success( - store.register_user(user_id="@tester:test", password_hash=None) - ) - for i in range(1, 3): - self.get_success( - store.register_user(user_id="@tester%d:test" % i, password_hash=None) - ) - - # Now attempt to map to a username, this will fail since all potential usernames are taken. - saml_response = FakeAuthnResponse({"uid": "tester", "username": "tester"}) - self.get_success( - self.handler._handle_authn_response(request, saml_response, ""), - ) - sso_handler.render_error.assert_called_once_with( - request, - "mapping_error", - "Unable to generate a Matrix ID from the SSO response", - ) - auth_handler.complete_sso_login.assert_not_called() - - @override_config( - { - "saml2_config": { - "user_mapping_provider": { - "module": __name__ + ".TestRedirectMappingProvider" - }, - } - } - ) - def test_map_saml_response_redirect(self) -> None: - """Test a mapping provider that raises a RedirectException""" - - saml_response = FakeAuthnResponse({"uid": "test", "username": "test_user"}) - request = _mock_request() - e = self.get_failure( - self.handler._handle_authn_response(request, saml_response, ""), - RedirectException, - ) - self.assertEqual(e.value.location, b"https://custom-saml-redirect/") - - @override_config( - { - "saml2_config": { - "attribute_requirements": [ - {"attribute": "userGroup", "value": "staff"}, - {"attribute": "department", "value": "sales"}, - ], - }, - } - ) - def test_attribute_requirements(self) -> None: - """The required attributes must be met from the SAML response.""" - - # stub out the auth handler - auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = AsyncMock() # type: ignore[method-assign] - - # The response doesn't have the proper userGroup or department. - saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) - request = _mock_request() - self.get_success( - self.handler._handle_authn_response(request, saml_response, "redirect_uri") - ) - auth_handler.complete_sso_login.assert_not_called() - - # The response doesn't have the proper department. - saml_response = FakeAuthnResponse( - {"uid": "test_user", "username": "test_user", "userGroup": ["staff"]} - ) - request = _mock_request() - self.get_success( - self.handler._handle_authn_response(request, saml_response, "redirect_uri") - ) - auth_handler.complete_sso_login.assert_not_called() - - # Add the proper attributes and it should succeed. - saml_response = FakeAuthnResponse( - { - "uid": "test_user", - "username": "test_user", - "userGroup": ["staff", "admin"], - "department": ["sales"], - } - ) - request.reset_mock() - self.get_success( - self.handler._handle_authn_response(request, saml_response, "redirect_uri") - ) - - # check that the auth handler got called as expected - auth_handler.complete_sso_login.assert_called_once_with( - "@test_user:test", - "saml", - request, - "redirect_uri", - None, - new_user=True, - auth_provider_session_id=None, - ) - - -def _mock_request() -> Mock: - """Returns a mock which will stand in as a SynapseRequest""" - mock = Mock( - spec=[ - "finish", - "getClientAddress", - "getHeader", - "setHeader", - "setResponseCode", - "write", - ] - ) - # `_disconnected` musn't be another `Mock`, otherwise it will be truthy. - mock._disconnected = False - return mock diff --git a/tests/handlers/test_send_email.py b/tests/handlers/test_send_email.py deleted file mode 100644
index cedcea27d9..0000000000 --- a/tests/handlers/test_send_email.py +++ /dev/null
@@ -1,230 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2021 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - - -from typing import Callable, List, Tuple, Type, Union -from unittest.mock import patch - -from zope.interface import implementer - -from twisted.internet import defer -from twisted.internet._sslverify import ClientTLSOptions -from twisted.internet.address import IPv4Address, IPv6Address -from twisted.internet.defer import ensureDeferred -from twisted.internet.interfaces import IProtocolFactory -from twisted.internet.ssl import ContextFactory -from twisted.mail import interfaces, smtp - -from tests.server import FakeTransport -from tests.unittest import HomeserverTestCase, override_config - - -def TestingESMTPTLSClientFactory( - contextFactory: ContextFactory, - _connectWrapped: bool, - wrappedProtocol: IProtocolFactory, -) -> IProtocolFactory: - """We use this to pass through in testing without using TLS, but - saving the context information to check that it would have happened. - - Note that this is what the MemoryReactor does on connectSSL. - It only saves the contextFactory, but starts the connection with the - underlying Factory. - See: L{twisted.internet.testing.MemoryReactor.connectSSL}""" - - wrappedProtocol._testingContextFactory = contextFactory # type: ignore[attr-defined] - return wrappedProtocol - - -@implementer(interfaces.IMessageDelivery) -class _DummyMessageDelivery: - def __init__(self) -> None: - # (recipient, message) tuples - self.messages: List[Tuple[smtp.Address, bytes]] = [] - - def receivedHeader( - self, - helo: Tuple[bytes, bytes], - origin: smtp.Address, - recipients: List[smtp.User], - ) -> None: - return None - - def validateFrom( - self, helo: Tuple[bytes, bytes], origin: smtp.Address - ) -> smtp.Address: - return origin - - def record_message(self, recipient: smtp.Address, message: bytes) -> None: - self.messages.append((recipient, message)) - - def validateTo(self, user: smtp.User) -> Callable[[], interfaces.IMessageSMTP]: - return lambda: _DummyMessage(self, user) - - -@implementer(interfaces.IMessageSMTP) -class _DummyMessage: - """IMessageSMTP implementation which saves the message delivered to it - to the _DummyMessageDelivery object. - """ - - def __init__(self, delivery: _DummyMessageDelivery, user: smtp.User): - self._delivery = delivery - self._user = user - self._buffer: List[bytes] = [] - - def lineReceived(self, line: bytes) -> None: - self._buffer.append(line) - - def eomReceived(self) -> "defer.Deferred[bytes]": - message = b"\n".join(self._buffer) + b"\n" - self._delivery.record_message(self._user.dest, message) - return defer.succeed(b"saved") - - def connectionLost(self) -> None: - pass - - -class SendEmailHandlerTestCaseIPv4(HomeserverTestCase): - ip_class: Union[Type[IPv4Address], Type[IPv6Address]] = IPv4Address - - def setUp(self) -> None: - super().setUp() - self.reactor.lookups["localhost"] = "127.0.0.1" - - def test_send_email(self) -> None: - """Happy-path test that we can send email to a non-TLS server.""" - h = self.hs.get_send_email_handler() - d = ensureDeferred( - h.send_email( - "foo@bar.com", "test subject", "Tests", "HTML content", "Text content" - ) - ) - # there should be an attempt to connect to localhost:25 - self.assertEqual(len(self.reactor.tcpClients), 1) - (host, port, client_factory, _timeout, _bindAddress) = self.reactor.tcpClients[ - 0 - ] - self.assertEqual(host, self.reactor.lookups["localhost"]) - self.assertEqual(port, 25) - - # wire it up to an SMTP server - message_delivery = _DummyMessageDelivery() - server_protocol = smtp.ESMTP() - server_protocol.delivery = message_delivery - # make sure that the server uses the test reactor to set timeouts - server_protocol.callLater = self.reactor.callLater # type: ignore[assignment] - - client_protocol = client_factory.buildProtocol(None) - client_protocol.makeConnection(FakeTransport(server_protocol, self.reactor)) - server_protocol.makeConnection( - FakeTransport( - client_protocol, - self.reactor, - peer_address=self.ip_class( - "TCP", self.reactor.lookups["localhost"], 1234 - ), - ) - ) - - # the message should now get delivered - self.get_success(d, by=0.1) - - # check it arrived - self.assertEqual(len(message_delivery.messages), 1) - user, msg = message_delivery.messages.pop() - self.assertEqual(str(user), "foo@bar.com") - self.assertIn(b"Subject: test subject", msg) - - @patch( - "synapse.handlers.send_email.TLSMemoryBIOFactory", - TestingESMTPTLSClientFactory, - ) - @override_config( - { - "email": { - "notif_from": "noreply@test", - "force_tls": True, - }, - } - ) - def test_send_email_force_tls(self) -> None: - """Happy-path test that we can send email to an Implicit TLS server.""" - h = self.hs.get_send_email_handler() - d = ensureDeferred( - h.send_email( - "foo@bar.com", "test subject", "Tests", "HTML content", "Text content" - ) - ) - # there should be an attempt to connect to localhost:465 - self.assertEqual(len(self.reactor.tcpClients), 1) - ( - host, - port, - client_factory, - _timeout, - _bindAddress, - ) = self.reactor.tcpClients[0] - self.assertEqual(host, self.reactor.lookups["localhost"]) - self.assertEqual(port, 465) - # We need to make sure that TLS is happenning - self.assertIsInstance( - client_factory._wrappedFactory._testingContextFactory, - ClientTLSOptions, - ) - # And since we use endpoints, they go through reactor.connectTCP - # which works differently to connectSSL on the testing reactor - - # wire it up to an SMTP server - message_delivery = _DummyMessageDelivery() - server_protocol = smtp.ESMTP() - server_protocol.delivery = message_delivery - # make sure that the server uses the test reactor to set timeouts - server_protocol.callLater = self.reactor.callLater # type: ignore[assignment] - - client_protocol = client_factory.buildProtocol(None) - client_protocol.makeConnection(FakeTransport(server_protocol, self.reactor)) - server_protocol.makeConnection( - FakeTransport( - client_protocol, - self.reactor, - peer_address=self.ip_class( - "TCP", self.reactor.lookups["localhost"], 1234 - ), - ) - ) - - # the message should now get delivered - self.get_success(d, by=0.1) - - # check it arrived - self.assertEqual(len(message_delivery.messages), 1) - user, msg = message_delivery.messages.pop() - self.assertEqual(str(user), "foo@bar.com") - self.assertIn(b"Subject: test subject", msg) - - -class SendEmailHandlerTestCaseIPv6(SendEmailHandlerTestCaseIPv4): - ip_class = IPv6Address - - def setUp(self) -> None: - super().setUp() - self.reactor.lookups["localhost"] = "::1" diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py
index 96da47f3b9..7144c58217 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py
@@ -18,39 +18,40 @@ # # import logging -from copy import deepcopy -from typing import Dict, List, Optional +from typing import AbstractSet, Dict, Mapping, Optional, Set, Tuple from unittest.mock import patch -from parameterized import parameterized +import attr +from parameterized import parameterized, parameterized_class from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import ( - AccountDataTypes, - EventContentFields, EventTypes, JoinRules, Membership, - RoomTypes, ) from synapse.api.room_versions import RoomVersions -from synapse.events import StrippedStateEvent, make_event_from_dict -from synapse.events.snapshot import EventContext from synapse.handlers.sliding_sync import ( + MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER, + RoomsForUserType, RoomSyncConfig, StateValues, - _RoomMembershipForUser, + _required_state_changes, ) from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.types import JsonDict, StreamToken, UserID -from synapse.types.handlers import SlidingSyncConfig +from synapse.types import JsonDict, StateMap, StreamToken, UserID, create_requester +from synapse.types.handlers.sliding_sync import PerConnectionState, SlidingSyncConfig +from synapse.types.state import StateFilter from synapse.util import Clock +from tests import unittest from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase +from tests.test_utils.event_injection import create_event from tests.unittest import HomeserverTestCase, TestCase logger = logging.getLogger(__name__) @@ -566,31 +567,39 @@ class RoomSyncConfigTestCase(TestCase): """ Combine A into B and B into A to make sure we get the same result. """ - # Since we're mutating these in place, make a copy for each of our trials - room_sync_config_a = deepcopy(a) - room_sync_config_b = deepcopy(b) - - # Combine B into A - room_sync_config_a.combine_room_sync_config(room_sync_config_b) - - self._assert_room_config_equal(room_sync_config_a, expected, "B into A") - - # Since we're mutating these in place, make a copy for each of our trials - room_sync_config_a = deepcopy(a) - room_sync_config_b = deepcopy(b) - - # Combine A into B - room_sync_config_b.combine_room_sync_config(room_sync_config_a) - - self._assert_room_config_equal(room_sync_config_b, expected, "A into B") - - -class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): + combined_config = a.combine_room_sync_config(b) + self._assert_room_config_equal(combined_config, expected, "B into A") + + combined_config = a.combine_room_sync_config(b) + self._assert_room_config_equal(combined_config, expected, "A into B") + + +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) +class ComputeInterestedRoomsTestCase(SlidingSyncBase): """ - Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it returns + Tests Sliding Sync handler `compute_interested_rooms()` to make sure it returns the correct list of rooms IDs. """ + # FIXME: We should refactor these tests to run against `compute_interested_rooms(...)` + # instead of just `get_room_membership_for_user_at_to_token(...)` which is only used + # in the fallback path (`_compute_interested_rooms_fallback(...)`). These scenarios do + # well to stress that logic and we shouldn't remove them just because we're removing + # the fallback path (tracked by https://github.com/element-hq/synapse/issues/17623). + servlets = [ admin.register_servlets, knock.register_servlets, @@ -609,6 +618,11 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.store = self.hs.get_datastores().main self.event_sources = hs.get_event_sources() self.storage_controllers = hs.get_storage_controllers() + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.persistence = persistence + + super().prepare(reactor, clock, hs) def test_no_rooms(self) -> None: """ @@ -619,15 +633,28 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): now_token = self.event_sources.get_current_token() - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=now_token, to_token=now_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) - self.assertEqual(room_id_results.keys(), set()) + self.assertIncludes(room_id_results, set(), exact=True) def test_get_newly_joined_room(self) -> None: """ @@ -646,26 +673,48 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room_token, to_token=after_room_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms - self.assertEqual(room_id_results.keys(), {room_id}) + self.assertIncludes( + room_id_results, + {room_id}, + exact=True, + ) # It should be pointing to the join event (latest membership event in the # from/to range) self.assertEqual( - room_id_results[room_id].event_id, + interested_rooms.room_membership_for_user_map[room_id].event_id, join_response["event_id"], ) - self.assertEqual(room_id_results[room_id].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id].membership, + Membership.JOIN, + ) # We should be considered `newly_joined` because we joined during the token # range - self.assertEqual(room_id_results[room_id].newly_joined, True) - self.assertEqual(room_id_results[room_id].newly_left, False) + self.assertTrue(room_id in newly_joined) + self.assertTrue(room_id not in newly_left) def test_get_already_joined_room(self) -> None: """ @@ -681,25 +730,43 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_room_token, to_token=after_room_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms - self.assertEqual(room_id_results.keys(), {room_id}) + self.assertIncludes(room_id_results, {room_id}, exact=True) # It should be pointing to the join event (latest membership event in the # from/to range) self.assertEqual( - room_id_results[room_id].event_id, + interested_rooms.room_membership_for_user_map[room_id].event_id, join_response["event_id"], ) - self.assertEqual(room_id_results[room_id].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id].membership, + Membership.JOIN, + ) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id].newly_joined, False) - self.assertEqual(room_id_results[room_id].newly_left, False) + self.assertTrue(room_id not in newly_joined) + self.assertTrue(room_id not in newly_left) def test_get_invited_banned_knocked_room(self) -> None: """ @@ -755,48 +822,73 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room_token, to_token=after_room_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Ensure that the invited, ban, and knock rooms show up - self.assertEqual( - room_id_results.keys(), + self.assertIncludes( + room_id_results, { invited_room_id, ban_room_id, knock_room_id, }, + exact=True, ) # It should be pointing to the the respective membership event (latest # membership event in the from/to range) self.assertEqual( - room_id_results[invited_room_id].event_id, + interested_rooms.room_membership_for_user_map[invited_room_id].event_id, invite_response["event_id"], ) - self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE) - self.assertEqual(room_id_results[invited_room_id].newly_joined, False) - self.assertEqual(room_id_results[invited_room_id].newly_left, False) + self.assertEqual( + interested_rooms.room_membership_for_user_map[invited_room_id].membership, + Membership.INVITE, + ) + self.assertTrue(invited_room_id not in newly_joined) + self.assertTrue(invited_room_id not in newly_left) self.assertEqual( - room_id_results[ban_room_id].event_id, + interested_rooms.room_membership_for_user_map[ban_room_id].event_id, ban_response["event_id"], ) - self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN) - self.assertEqual(room_id_results[ban_room_id].newly_joined, False) - self.assertEqual(room_id_results[ban_room_id].newly_left, False) + self.assertEqual( + interested_rooms.room_membership_for_user_map[ban_room_id].membership, + Membership.BAN, + ) + self.assertTrue(ban_room_id not in newly_joined) + self.assertTrue(ban_room_id not in newly_left) self.assertEqual( - room_id_results[knock_room_id].event_id, + interested_rooms.room_membership_for_user_map[knock_room_id].event_id, knock_room_membership_state_event.event_id, ) - self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK) - self.assertEqual(room_id_results[knock_room_id].newly_joined, False) - self.assertEqual(room_id_results[knock_room_id].newly_left, False) + self.assertEqual( + interested_rooms.room_membership_for_user_map[knock_room_id].membership, + Membership.KNOCK, + ) + self.assertTrue(knock_room_id not in newly_joined) + self.assertTrue(knock_room_id not in newly_left) def test_get_kicked_room(self) -> None: """ @@ -827,27 +919,47 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_kick_token = self.event_sources.get_current_token() - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_kick_token, to_token=after_kick_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # The kicked room should show up - self.assertEqual(room_id_results.keys(), {kick_room_id}) + self.assertIncludes(room_id_results, {kick_room_id}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[kick_room_id].event_id, + interested_rooms.room_membership_for_user_map[kick_room_id].event_id, kick_response["event_id"], ) - self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) - self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) + self.assertEqual( + interested_rooms.room_membership_for_user_map[kick_room_id].membership, + Membership.LEAVE, + ) + self.assertNotEqual( + interested_rooms.room_membership_for_user_map[kick_room_id].sender, user1_id + ) # We should *NOT* be `newly_joined` because we were not joined at the the time # of the `to_token`. - self.assertEqual(room_id_results[kick_room_id].newly_joined, False) - self.assertEqual(room_id_results[kick_room_id].newly_left, False) + self.assertTrue(kick_room_id not in newly_joined) + self.assertTrue(kick_room_id not in newly_left) def test_forgotten_rooms(self) -> None: """ @@ -920,16 +1032,29 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): ) self.assertEqual(channel.code, 200, channel.result) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room_forgets, to_token=before_room_forgets, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) # We shouldn't see the room because it was forgotten - self.assertEqual(room_id_results.keys(), set()) + self.assertIncludes(room_id_results, set(), exact=True) def test_newly_left_rooms(self) -> None: """ @@ -940,7 +1065,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Leave before we calculate the `from_token` room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) - leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok) + _leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() @@ -950,34 +1075,55 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room2_token = self.event_sources.get_current_token() - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_room1_token, to_token=after_room2_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms - self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) - - self.assertEqual( - room_id_results[room_id1].event_id, - leave_response1["event_id"], + # `room_id1` should not show up because it was left before the token range. + # `room_id2` should show up because it is `newly_left` within the token range. + self.assertIncludes( + room_id_results, + {room_id2}, + exact=True, + message="Corresponding map to disambiguate the opaque room IDs: " + + str( + { + "room_id1": room_id1, + "room_id2": room_id2, + } + ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) - # We should *NOT* be `newly_joined` or `newly_left` because that happened before - # the from/to range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) self.assertEqual( - room_id_results[room_id2].event_id, + interested_rooms.room_membership_for_user_map[room_id2].event_id, leave_response2["event_id"], ) - self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id2].membership, + Membership.LEAVE, + ) # We should *NOT* be `newly_joined` because we are instead `newly_left` - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, True) + self.assertTrue(room_id2 not in newly_joined) + self.assertTrue(room_id2 in newly_left) def test_no_joins_after_to_token(self) -> None: """ @@ -1000,24 +1146,42 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) self.helper.join(room_id2, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, join_response1["event_id"], ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_join_during_range_and_left_room_after_to_token(self) -> None: """ @@ -1040,20 +1204,35 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Leave the room after we already have our tokens leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # We should still see the room because we were joined during the # from_token/to_token time period. - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, join_response["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1063,10 +1242,13 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_join_before_range_and_left_room_after_to_token(self) -> None: """ @@ -1087,19 +1269,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Leave the room after we already have our tokens leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # We should still see the room because we were joined before the `from_token` - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, join_response["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1109,10 +1306,13 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_kicked_before_range_and_left_after_to_token(self) -> None: """ @@ -1151,19 +1351,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_response2 = self.helper.join(kick_room_id, user1_id, tok=user1_tok) leave_response = self.helper.leave(kick_room_id, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_kick_token, to_token=after_kick_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # We shouldn't see the room because it was forgotten - self.assertEqual(room_id_results.keys(), {kick_room_id}) + self.assertIncludes(room_id_results, {kick_room_id}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[kick_room_id].event_id, + interested_rooms.room_membership_for_user_map[kick_room_id].event_id, kick_response["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1175,11 +1390,16 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[kick_room_id].membership, Membership.LEAVE) - self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) + self.assertEqual( + interested_rooms.room_membership_for_user_map[kick_room_id].membership, + Membership.LEAVE, + ) + self.assertNotEqual( + interested_rooms.room_membership_for_user_map[kick_room_id].sender, user1_id + ) # We should *NOT* be `newly_joined` because we were kicked - self.assertEqual(room_id_results[kick_room_id].newly_joined, False) - self.assertEqual(room_id_results[kick_room_id].newly_left, False) + self.assertTrue(kick_room_id not in newly_joined) + self.assertTrue(kick_room_id not in newly_left) def test_newly_left_during_range_and_join_leave_after_to_token(self) -> None: """ @@ -1207,19 +1427,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) leave_response2 = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should still show up because it's newly_left during the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, leave_response1["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1231,11 +1466,14 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.LEAVE, + ) # We should *NOT* be `newly_joined` because we are actually `newly_left` during # the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, True) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 in newly_left) def test_newly_left_during_range_and_join_after_to_token(self) -> None: """ @@ -1262,19 +1500,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Join the room after we already have our tokens join_response2 = self.helper.join(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should still show up because it's newly_left during the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, leave_response1["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1285,11 +1538,14 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.LEAVE, + ) # We should *NOT* be `newly_joined` because we are actually `newly_left` during # the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, True) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 in newly_left) def test_no_from_token(self) -> None: """ @@ -1314,47 +1570,53 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Join and leave the room2 before the `to_token` self.helper.join(room_id2, user1_id, tok=user1_tok) - leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) + _leave_response2 = self.helper.leave(room_id2, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() # Join the room2 after we already have our tokens self.helper.join(room_id2, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=None, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Only rooms we were joined to before the `to_token` should show up - self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # Room1 # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, join_response1["event_id"], ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) - # We should *NOT* be `newly_joined`/`newly_left` because there is no - # `from_token` to define a "live" range to compare against - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) - - # Room2 - # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id2].event_id, - leave_response2["event_id"], + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, ) - self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) # We should *NOT* be `newly_joined`/`newly_left` because there is no # `from_token` to define a "live" range to compare against - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_from_token_ahead_of_to_token(self) -> None: """ @@ -1378,7 +1640,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Join and leave the room2 before `to_token` _join_room2_response1 = self.helper.join(room_id2, user1_id, tok=user1_tok) - leave_room2_response1 = self.helper.leave(room_id2, user1_id, tok=user1_tok) + _leave_room2_response1 = self.helper.leave(room_id2, user1_id, tok=user1_tok) # Note: These are purposely swapped. The `from_token` should come after # the `to_token` in this test @@ -1403,54 +1665,69 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Join the room4 after we already have our tokens self.helper.join(room_id4, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=from_token, to_token=to_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # In the "current" state snapshot, we're joined to all of the rooms but in the # from/to token range... self.assertIncludes( - room_id_results.keys(), + room_id_results, { # Included because we were joined before both tokens room_id1, - # Included because we had membership before the to_token - room_id2, + # Excluded because we left before the `from_token` and `to_token` + # room_id2, # Excluded because we joined after the `to_token` # room_id3, # Excluded because we joined after the `to_token` # room_id4, }, exact=True, + message="Corresponding map to disambiguate the opaque room IDs: " + + str( + { + "room_id1": room_id1, + "room_id2": room_id2, + "room_id3": room_id3, + "room_id4": room_id4, + } + ), ) # Room1 # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, join_room1_response1["event_id"], ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) - # We should *NOT* be `newly_joined`/`newly_left` because we joined `room1` - # before either of the tokens - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) - - # Room2 - # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id2].event_id, - leave_room2_response1["event_id"], + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, ) - self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) - # We should *NOT* be `newly_joined`/`newly_left` because we joined and left - # `room1` before either of the tokens - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, False) + # We should *NOT* be `newly_joined`/`newly_left` because we joined `room1` + # before either of the tokens + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_leave_before_range_and_join_leave_after_to_token(self) -> None: """ @@ -1468,7 +1745,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) # Join and leave the room before the from/to range self.helper.join(room_id1, user1_id, tok=user1_tok) - leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) + self.helper.leave(room_id1, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() @@ -1476,25 +1753,28 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): self.helper.join(room_id1, user1_id, tok=user1_tok) self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) - self.assertEqual(room_id_results.keys(), {room_id1}) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( - room_id_results[room_id1].event_id, - leave_response["event_id"], - ) - self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) - # We should *NOT* be `newly_joined`/`newly_left` because we joined and left - # `room1` before either of the tokens - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertIncludes(room_id_results, set(), exact=True) def test_leave_before_range_and_join_after_to_token(self) -> None: """ @@ -1512,32 +1792,35 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) # Join and leave the room before the from/to range self.helper.join(room_id1, user1_id, tok=user1_tok) - leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) + self.helper.leave(room_id1, user1_id, tok=user1_tok) after_room1_token = self.event_sources.get_current_token() # Join the room after we already have our tokens self.helper.join(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) - self.assertEqual(room_id_results.keys(), {room_id1}) - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( - room_id_results[room_id1].event_id, - leave_response["event_id"], - ) - self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) - # We should *NOT* be `newly_joined`/`newly_left` because we joined and left - # `room1` before either of the tokens - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertIncludes(room_id_results, set(), exact=True) def test_join_leave_multiple_times_during_range_and_after_to_token( self, @@ -1569,19 +1852,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok) leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should show up because it was newly_left and joined during the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, join_response2["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1595,12 +1893,15 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) + self.assertTrue(room_id1 in newly_joined) # We should *NOT* be `newly_left` because we joined during the token range and # was still joined at the end of the range - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_left) def test_join_leave_multiple_times_before_range_and_after_to_token( self, @@ -1631,19 +1932,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_response3 = self.helper.join(room_id1, user1_id, tok=user1_tok) leave_response3 = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should show up because we were joined before the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, join_response2["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1657,10 +1973,13 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_invite_before_range_and_join_leave_after_to_token( self, @@ -1690,19 +2009,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): join_respsonse = self.helper.join(room_id1, user1_id, tok=user1_tok) leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should show up because we were invited before the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, invite_response["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1713,11 +2047,14 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.INVITE) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.INVITE, + ) # We should *NOT* be `newly_joined` because we were only invited before the # token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_join_and_display_name_changes_in_token_range( self, @@ -1764,19 +2101,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): tok=user1_tok, ) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should show up because we were joined during the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, displayname_change_during_token_range_response["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1791,10 +2143,13 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_display_name_changes_in_token_range( self, @@ -1829,19 +2184,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_change1_token = self.event_sources.get_current_token() - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_room1_token, to_token=after_change1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should show up because we were joined during the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, displayname_change_during_token_range_response["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1853,10 +2223,13 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_display_name_changes_before_and_after_token_range( self, @@ -1901,19 +2274,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): tok=user1_tok, ) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should show up because we were joined before the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, displayname_change_before_token_range_response["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -1928,18 +2316,22 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) - def test_display_name_changes_leave_after_token_range( + def test_newly_joined_display_name_changes_leave_after_token_range( self, ) -> None: """ Test that we point to the correct membership event within the from/to range even - if there are multiple `join` membership events in a row indicating - `displayname`/`avatar_url` updates and we leave after the `to_token`. + if we are `newly_joined` and there are multiple `join` membership events in a + row indicating `displayname`/`avatar_url` updates and we leave after the + `to_token`. See condition "1a)" comments in the `get_room_membership_for_user_at_to_token()` method. """ @@ -1954,6 +2346,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # leave and can still re-join. room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + # Update the displayname during the token range displayname_change_during_token_range_response = self.helper.send_state( room_id1, @@ -1983,19 +2376,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Leave after the token self.helper.leave(room_id1, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should show up because we were joined during the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, displayname_change_during_token_range_response["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -2010,10 +2418,117 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) + + def test_display_name_changes_leave_after_token_range( + self, + ) -> None: + """ + Test that we point to the correct membership event within the from/to range even + if there are multiple `join` membership events in a row indicating + `displayname`/`avatar_url` updates and we leave after the `to_token`. + + See condition "1a)" comments in the `get_room_membership_for_user_at_to_token()` method. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + _before_room1_token = self.event_sources.get_current_token() + + # We create the room with user2 so the room isn't left with no members when we + # leave and can still re-join. + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) + join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + + after_join_token = self.event_sources.get_current_token() + + # Update the displayname during the token range + displayname_change_during_token_range_response = self.helper.send_state( + room_id1, + event_type=EventTypes.Member, + state_key=user1_id, + body={ + "membership": Membership.JOIN, + "displayname": "displayname during token range", + }, + tok=user1_tok, + ) + + after_display_name_change_token = self.event_sources.get_current_token() + + # Update the displayname after the token range + displayname_change_after_token_range_response = self.helper.send_state( + room_id1, + event_type=EventTypes.Member, + state_key=user1_id, + body={ + "membership": Membership.JOIN, + "displayname": "displayname after token range", + }, + tok=user1_tok, + ) + + # Leave after the token + self.helper.leave(room_id1, user1_id, tok=user1_tok) + + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), + from_token=after_join_token, + to_token=after_display_name_change_token, + ) + ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms + + # Room should show up because we were joined during the from/to range + self.assertIncludes(room_id_results, {room_id1}, exact=True) + # It should be pointing to the latest membership event in the from/to range + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].event_id, + displayname_change_during_token_range_response["event_id"], + "Corresponding map to disambiguate the opaque event IDs: " + + str( + { + "join_response": join_response["event_id"], + "displayname_change_during_token_range_response": displayname_change_during_token_range_response[ + "event_id" + ], + "displayname_change_after_token_range_response": displayname_change_after_token_range_response[ + "event_id" + ], + } + ), + ) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) + # We only changed our display name during the token range so we shouldn't be + # considered `newly_joined` or `newly_left` + self.assertTrue(room_id1 not in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_display_name_changes_join_after_token_range( self, @@ -2051,16 +2566,29 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): tok=user1_tok, ) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) # Room shouldn't show up because we joined after the from/to range - self.assertEqual(room_id_results.keys(), set()) + self.assertIncludes(room_id_results, set(), exact=True) def test_newly_joined_with_leave_join_in_token_range( self, @@ -2087,26 +2615,44 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_more_changes_token = self.event_sources.get_current_token() - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=after_room1_token, to_token=after_more_changes_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should show up because we were joined during the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, join_response2["event_id"], ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should be considered `newly_joined` because there is some non-join event in # between our latest join event. - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_newly_joined_only_joins_during_token_range( self, @@ -2152,19 +2698,34 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): after_room1_token = self.event_sources.get_current_token() - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room1_token, to_token=after_room1_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room should show up because it was newly_left and joined during the from/to range - self.assertEqual(room_id_results.keys(), {room_id1}) + self.assertIncludes(room_id_results, {room_id1}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, displayname_change_during_token_range_response2["event_id"], "Corresponding map to disambiguate the opaque event IDs: " + str( @@ -2179,10 +2740,13 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): } ), ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) # We should be `newly_joined` because we first joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) def test_multiple_rooms_are_not_confused( self, @@ -2205,7 +2769,7 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Invited and left the room before the token self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) - leave_room1_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) + _leave_room1_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) # Invited to room2 invite_room2_response = self.helper.invite( room_id2, src=user2_id, targ=user1_id, tok=user2_tok @@ -2228,61 +2792,71 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): # Leave room3 self.helper.leave(room_id3, user1_id, tok=user1_tok) - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_room3_token, to_token=after_room3_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms - self.assertEqual( - room_id_results.keys(), + self.assertIncludes( + room_id_results, { - # Left before the from/to range - room_id1, + # Excluded because we left before the from/to range + # room_id1, # Invited before the from/to range room_id2, # `newly_left` during the from/to range room_id3, }, + exact=True, ) - # Room1 - # It should be pointing to the latest membership event in the from/to range - self.assertEqual( - room_id_results[room_id1].event_id, - leave_room1_response["event_id"], - ) - self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) - # We should *NOT* be `newly_joined`/`newly_left` because we were invited and left - # before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) - self.assertEqual(room_id_results[room_id1].newly_left, False) - # Room2 # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id2].event_id, + interested_rooms.room_membership_for_user_map[room_id2].event_id, invite_room2_response["event_id"], ) - self.assertEqual(room_id_results[room_id2].membership, Membership.INVITE) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id2].membership, + Membership.INVITE, + ) # We should *NOT* be `newly_joined`/`newly_left` because we were invited before # the token range - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, False) + self.assertTrue(room_id2 not in newly_joined) + self.assertTrue(room_id2 not in newly_left) # Room3 # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id3].event_id, + interested_rooms.room_membership_for_user_map[room_id3].event_id, leave_room3_response["event_id"], ) - self.assertEqual(room_id_results[room_id3].membership, Membership.LEAVE) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id3].membership, + Membership.LEAVE, + ) # We should be `newly_left` because we were invited and left during # the token range - self.assertEqual(room_id_results[room_id3].newly_joined, False) - self.assertEqual(room_id_results[room_id3].newly_left, True) + self.assertTrue(room_id3 not in newly_joined) + self.assertTrue(room_id3 in newly_left) def test_state_reset(self) -> None: """ @@ -2295,7 +2869,16 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): user2_tok = self.login(user2_id, "pass") # The room where the state reset will happen - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + room_id1 = self.helper.create_room_as( + user2_id, + is_public=True, + tok=user2_tok, + ) + # Create a dummy event for us to point back to for the state reset + dummy_event_response = self.helper.send(room_id1, "test", tok=user2_tok) + dummy_event_id = dummy_event_response["event_id"] + + # Join after the dummy event join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) # Join another room so we don't hit the short-circuit and return early if they @@ -2305,95 +2888,106 @@ class GetRoomMembershipForUserAtToTokenTestCase(HomeserverTestCase): before_reset_token = self.event_sources.get_current_token() - # Send another state event to make a position for the state reset to happen at - dummy_state_response = self.helper.send_state( - room_id1, - event_type="foobarbaz", - state_key="", - body={"foo": "bar"}, - tok=user2_tok, - ) - dummy_state_pos = self.get_success( - self.store.get_position_for_event(dummy_state_response["event_id"]) - ) - - # Mock a state reset removing the membership for user1 in the current state - self.get_success( - self.store.db_pool.simple_delete( - table="current_state_events", - keyvalues={ - "room_id": room_id1, - "type": EventTypes.Member, - "state_key": user1_id, - }, - desc="state reset user in current_state_events", + # Trigger a state reset + join_rule_event, join_rule_context = self.get_success( + create_event( + self.hs, + prev_event_ids=[dummy_event_id], + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.INVITE}, + sender=user2_id, + room_id=room_id1, + room_version=self.get_success(self.store.get_room_version_id(room_id1)), ) ) - self.get_success( - self.store.db_pool.simple_delete( - table="local_current_membership", - keyvalues={ - "room_id": room_id1, - "user_id": user1_id, - }, - desc="state reset user in local_current_membership", - ) - ) - self.get_success( - self.store.db_pool.simple_insert( - table="current_state_delta_stream", - values={ - "stream_id": dummy_state_pos.stream, - "room_id": room_id1, - "type": EventTypes.Member, - "state_key": user1_id, - "event_id": None, - "prev_event_id": join_response1["event_id"], - "instance_name": dummy_state_pos.instance_name, - }, - desc="state reset user in current_state_delta_stream", - ) + _, join_rule_event_pos, _ = self.get_success( + self.persistence.persist_event(join_rule_event, join_rule_context) ) - # Manually bust the cache since we we're just manually messing with the database - # and not causing an actual state reset. - self.store._membership_stream_cache.entity_has_changed( - user1_id, dummy_state_pos.stream - ) + # Ensure that the state reset worked and only user2 is in the room now + users_in_room = self.get_success(self.store.get_users_in_room(room_id1)) + self.assertIncludes(set(users_in_room), {user2_id}, exact=True) after_reset_token = self.event_sources.get_current_token() # The function under test - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_reset_token, to_token=after_reset_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms # Room1 should show up because it was `newly_left` via state reset during the from/to range - self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + self.assertIncludes(room_id_results, {room_id1, room_id2}, exact=True) # It should be pointing to no event because we were removed from the room # without a corresponding leave event self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, None, + "Corresponding map to disambiguate the opaque event IDs: " + + str( + { + "join_response1": join_response1["event_id"], + } + ), ) # State reset caused us to leave the room and there is no corresponding leave event - self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.LEAVE, + ) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertTrue(room_id1 not in newly_joined) # We should be `newly_left` because we were removed via state reset during the from/to range - self.assertEqual(room_id_results[room_id1].newly_left, True) - - -class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCase): + self.assertTrue(room_id1 in newly_left) + + +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) +class ComputeInterestedRoomsShardTestCase( + BaseMultiWorkerStreamTestCase, SlidingSyncBase +): """ - Tests Sliding Sync handler `get_room_membership_for_user_at_to_token()` to make sure it works with + Tests Sliding Sync handler `compute_interested_rooms()` to make sure it works with sharded event stream_writers enabled """ + # FIXME: We should refactor these tests to run against `compute_interested_rooms(...)` + # instead of just `get_room_membership_for_user_at_to_token(...)` which is only used + # in the fallback path (`_compute_interested_rooms_fallback(...)`). These scenarios do + # well to stress that logic and we shouldn't remove them just because we're removing + # the fallback path (tracked by https://github.com/element-hq/synapse/issues/17623). + servlets = [ admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -2488,7 +3082,7 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok) # Leave room2 - leave_room2_response = self.helper.leave(room_id2, user1_id, tok=user1_tok) + _leave_room2_response = self.helper.leave(room_id2, user1_id, tok=user1_tok) join_response3 = self.helper.join(room_id3, user1_id, tok=user1_tok) # Leave room3 self.helper.leave(room_id3, user1_id, tok=user1_tok) @@ -2578,60 +3172,77 @@ class GetRoomMembershipForUserAtToTokenShardTestCase(BaseMultiWorkerStreamTestCa self.get_success(actx.__aexit__(None, None, None)) # The function under test - room_id_results = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - UserID.from_string(user1_id), + interested_rooms = self.get_success( + self.sliding_sync_handler.room_lists.compute_interested_rooms( + SlidingSyncConfig( + user=UserID.from_string(user1_id), + requester=create_requester(user_id=user1_id), + lists={ + "foo-list": SlidingSyncConfig.SlidingSyncList( + ranges=[(0, 99)], + required_state=[], + timeline_limit=1, + ) + }, + conn_id=None, + ), + PerConnectionState(), from_token=before_stuck_activity_token, to_token=stuck_activity_token, ) ) + room_id_results = set(interested_rooms.lists["foo-list"].ops[0].room_ids) + newly_joined = interested_rooms.newly_joined_rooms + newly_left = interested_rooms.newly_left_rooms - self.assertEqual( - room_id_results.keys(), + self.assertIncludes( + room_id_results, { room_id1, - room_id2, + # Excluded because we left before the from/to range and the second join + # event happened while worker2 was stuck and technically occurs after + # the `stuck_activity_token`. + # room_id2, room_id3, }, + exact=True, + message="Corresponding map to disambiguate the opaque room IDs: " + + str( + { + "room_id1": room_id1, + "room_id2": room_id2, + "room_id3": room_id3, + } + ), ) # Room1 # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id1].event_id, + interested_rooms.room_membership_for_user_map[room_id1].event_id, join_room1_response["event_id"], ) - self.assertEqual(room_id_results[room_id1].membership, Membership.JOIN) - # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id1].newly_joined, True) - self.assertEqual(room_id_results[room_id1].newly_left, False) - - # Room2 - # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id2].event_id, - leave_room2_response["event_id"], - ) - self.assertEqual(room_id_results[room_id2].membership, Membership.LEAVE) - # room_id2 should *NOT* be considered `newly_left` because we left before the - # from/to range and the join event during the range happened while worker2 was - # stuck. This means that from the perspective of the master, where the - # `stuck_activity_token` is generated, the stream position for worker2 wasn't - # advanced to the join yet. Looking at the `instance_map`, the join technically - # comes after `stuck_activity_token`. - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, False) + interested_rooms.room_membership_for_user_map[room_id1].membership, + Membership.JOIN, + ) + # We should be `newly_joined` because we joined during the token range + self.assertTrue(room_id1 in newly_joined) + self.assertTrue(room_id1 not in newly_left) # Room3 # It should be pointing to the latest membership event in the from/to range self.assertEqual( - room_id_results[room_id3].event_id, + interested_rooms.room_membership_for_user_map[room_id3].event_id, join_on_worker3_response["event_id"], ) - self.assertEqual(room_id_results[room_id3].membership, Membership.JOIN) + self.assertEqual( + interested_rooms.room_membership_for_user_map[room_id3].membership, + Membership.JOIN, + ) # We should be `newly_joined` because we joined during the token range - self.assertEqual(room_id_results[room_id3].newly_joined, True) - self.assertEqual(room_id_results[room_id3].newly_left, False) + self.assertTrue(room_id3 in newly_joined) + self.assertTrue(room_id3 not in newly_left) class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): @@ -2658,31 +3269,35 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): self.store = self.hs.get_datastores().main self.event_sources = hs.get_event_sources() self.storage_controllers = hs.get_storage_controllers() + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.persistence = persistence def _get_sync_room_ids_for_user( self, user: UserID, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Dict[str, _RoomMembershipForUser]: + ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Get the rooms the user should be syncing with """ - room_membership_for_user_map = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + room_membership_for_user_map, newly_joined, newly_left = self.get_success( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( user=user, from_token=from_token, to_token=to_token, ) ) filtered_sync_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms_relevant_for_sync( + self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync( user=user, room_membership_for_user_map=room_membership_for_user_map, + newly_left_room_ids=newly_left, ) ) - return filtered_sync_room_map + return filtered_sync_room_map, newly_joined, newly_left def test_no_rooms(self) -> None: """ @@ -2693,13 +3308,13 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): now_token = self.event_sources.get_current_token() - room_id_results = self._get_sync_room_ids_for_user( + room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=now_token, to_token=now_token, ) - self.assertEqual(room_id_results.keys(), set()) + self.assertIncludes(room_id_results.keys(), set(), exact=True) def test_basic_rooms(self) -> None: """ @@ -2758,14 +3373,14 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): after_room_token = self.event_sources.get_current_token() - room_id_results = self._get_sync_room_ids_for_user( + room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=before_room_token, to_token=after_room_token, ) # Ensure that the invited, ban, and knock rooms show up - self.assertEqual( + self.assertIncludes( room_id_results.keys(), { join_room_id, @@ -2773,6 +3388,7 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): ban_room_id, knock_room_id, }, + exact=True, ) # It should be pointing to the the respective membership event (latest # membership event in the from/to range) @@ -2781,32 +3397,32 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): join_response["event_id"], ) self.assertEqual(room_id_results[join_room_id].membership, Membership.JOIN) - self.assertEqual(room_id_results[join_room_id].newly_joined, True) - self.assertEqual(room_id_results[join_room_id].newly_left, False) + self.assertTrue(join_room_id in newly_joined) + self.assertTrue(join_room_id not in newly_left) self.assertEqual( room_id_results[invited_room_id].event_id, invite_response["event_id"], ) self.assertEqual(room_id_results[invited_room_id].membership, Membership.INVITE) - self.assertEqual(room_id_results[invited_room_id].newly_joined, False) - self.assertEqual(room_id_results[invited_room_id].newly_left, False) + self.assertTrue(invited_room_id not in newly_joined) + self.assertTrue(invited_room_id not in newly_left) self.assertEqual( room_id_results[ban_room_id].event_id, ban_response["event_id"], ) self.assertEqual(room_id_results[ban_room_id].membership, Membership.BAN) - self.assertEqual(room_id_results[ban_room_id].newly_joined, False) - self.assertEqual(room_id_results[ban_room_id].newly_left, False) + self.assertTrue(ban_room_id not in newly_joined) + self.assertTrue(ban_room_id not in newly_left) self.assertEqual( room_id_results[knock_room_id].event_id, knock_room_membership_state_event.event_id, ) self.assertEqual(room_id_results[knock_room_id].membership, Membership.KNOCK) - self.assertEqual(room_id_results[knock_room_id].newly_joined, False) - self.assertEqual(room_id_results[knock_room_id].newly_left, False) + self.assertTrue(knock_room_id not in newly_joined) + self.assertTrue(knock_room_id not in newly_left) def test_only_newly_left_rooms_show_up(self) -> None: """ @@ -2829,21 +3445,21 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): after_room2_token = self.event_sources.get_current_token() - room_id_results = self._get_sync_room_ids_for_user( + room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=after_room1_token, to_token=after_room2_token, ) # Only the `newly_left` room should show up - self.assertEqual(room_id_results.keys(), {room_id2}) + self.assertIncludes(room_id_results.keys(), {room_id2}, exact=True) self.assertEqual( room_id_results[room_id2].event_id, _leave_response2["event_id"], ) # We should *NOT* be `newly_joined` because we are instead `newly_left` - self.assertEqual(room_id_results[room_id2].newly_joined, False) - self.assertEqual(room_id_results[room_id2].newly_left, True) + self.assertTrue(room_id2 not in newly_joined) + self.assertTrue(room_id2 in newly_left) def test_get_kicked_room(self) -> None: """ @@ -2874,14 +3490,14 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): after_kick_token = self.event_sources.get_current_token() - room_id_results = self._get_sync_room_ids_for_user( + room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=after_kick_token, to_token=after_kick_token, ) # The kicked room should show up - self.assertEqual(room_id_results.keys(), {kick_room_id}) + self.assertIncludes(room_id_results.keys(), {kick_room_id}, exact=True) # It should be pointing to the latest membership event in the from/to range self.assertEqual( room_id_results[kick_room_id].event_id, @@ -2891,8 +3507,8 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): self.assertNotEqual(room_id_results[kick_room_id].sender, user1_id) # We should *NOT* be `newly_joined` because we were not joined at the the time # of the `to_token`. - self.assertEqual(room_id_results[kick_room_id].newly_joined, False) - self.assertEqual(room_id_results[kick_room_id].newly_left, False) + self.assertTrue(kick_room_id not in newly_joined) + self.assertTrue(kick_room_id not in newly_left) def test_state_reset(self) -> None: """ @@ -2905,8 +3521,17 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): user2_tok = self.login(user2_id, "pass") # The room where the state reset will happen - room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok) + room_id1 = self.helper.create_room_as( + user2_id, + is_public=True, + tok=user2_tok, + ) + # Create a dummy event for us to point back to for the state reset + dummy_event_response = self.helper.send(room_id1, "test", tok=user2_tok) + dummy_event_id = dummy_event_response["event_id"] + + # Join after the dummy event + self.helper.join(room_id1, user1_id, tok=user1_tok) # Join another room so we don't hit the short-circuit and return early if they # have no room membership @@ -2915,73 +3540,38 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): before_reset_token = self.event_sources.get_current_token() - # Send another state event to make a position for the state reset to happen at - dummy_state_response = self.helper.send_state( - room_id1, - event_type="foobarbaz", - state_key="", - body={"foo": "bar"}, - tok=user2_tok, - ) - dummy_state_pos = self.get_success( - self.store.get_position_for_event(dummy_state_response["event_id"]) - ) - - # Mock a state reset removing the membership for user1 in the current state - self.get_success( - self.store.db_pool.simple_delete( - table="current_state_events", - keyvalues={ - "room_id": room_id1, - "type": EventTypes.Member, - "state_key": user1_id, - }, - desc="state reset user in current_state_events", + # Trigger a state reset + join_rule_event, join_rule_context = self.get_success( + create_event( + self.hs, + prev_event_ids=[dummy_event_id], + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.INVITE}, + sender=user2_id, + room_id=room_id1, + room_version=self.get_success(self.store.get_room_version_id(room_id1)), ) ) - self.get_success( - self.store.db_pool.simple_delete( - table="local_current_membership", - keyvalues={ - "room_id": room_id1, - "user_id": user1_id, - }, - desc="state reset user in local_current_membership", - ) - ) - self.get_success( - self.store.db_pool.simple_insert( - table="current_state_delta_stream", - values={ - "stream_id": dummy_state_pos.stream, - "room_id": room_id1, - "type": EventTypes.Member, - "state_key": user1_id, - "event_id": None, - "prev_event_id": join_response1["event_id"], - "instance_name": dummy_state_pos.instance_name, - }, - desc="state reset user in current_state_delta_stream", - ) + _, join_rule_event_pos, _ = self.get_success( + self.persistence.persist_event(join_rule_event, join_rule_context) ) - # Manually bust the cache since we we're just manually messing with the database - # and not causing an actual state reset. - self.store._membership_stream_cache.entity_has_changed( - user1_id, dummy_state_pos.stream - ) + # Ensure that the state reset worked and only user2 is in the room now + users_in_room = self.get_success(self.store.get_users_in_room(room_id1)) + self.assertIncludes(set(users_in_room), {user2_id}, exact=True) after_reset_token = self.event_sources.get_current_token() # The function under test - room_id_results = self._get_sync_room_ids_for_user( + room_id_results, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=before_reset_token, to_token=after_reset_token, ) # Room1 should show up because it was `newly_left` via state reset during the from/to range - self.assertEqual(room_id_results.keys(), {room_id1, room_id2}) + self.assertIncludes(room_id_results.keys(), {room_id1, room_id2}, exact=True) # It should be pointing to no event because we were removed from the room # without a corresponding leave event self.assertEqual( @@ -2991,1345 +3581,9 @@ class FilterRoomsRelevantForSyncTestCase(HomeserverTestCase): # State reset caused us to leave the room and there is no corresponding leave event self.assertEqual(room_id_results[room_id1].membership, Membership.LEAVE) # We should *NOT* be `newly_joined` because we joined before the token range - self.assertEqual(room_id_results[room_id1].newly_joined, False) + self.assertTrue(room_id1 not in newly_joined) # We should be `newly_left` because we were removed via state reset during the from/to range - self.assertEqual(room_id_results[room_id1].newly_left, True) - - -class FilterRoomsTestCase(HomeserverTestCase): - """ - Tests Sliding Sync handler `filter_rooms()` to make sure it includes/excludes rooms - correctly. - """ - - servlets = [ - admin.register_servlets, - knock.register_servlets, - login.register_servlets, - room.register_servlets, - ] - - def default_config(self) -> JsonDict: - config = super().default_config() - # Enable sliding sync - config["experimental_features"] = {"msc3575_enabled": True} - return config - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.sliding_sync_handler = self.hs.get_sliding_sync_handler() - self.store = self.hs.get_datastores().main - self.event_sources = hs.get_event_sources() - - def _get_sync_room_ids_for_user( - self, - user: UserID, - to_token: StreamToken, - from_token: Optional[StreamToken], - ) -> Dict[str, _RoomMembershipForUser]: - """ - Get the rooms the user should be syncing with - """ - room_membership_for_user_map = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( - user=user, - from_token=from_token, - to_token=to_token, - ) - ) - filtered_sync_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms_relevant_for_sync( - user=user, - room_membership_for_user_map=room_membership_for_user_map, - ) - ) - - return filtered_sync_room_map - - def _create_dm_room( - self, - inviter_user_id: str, - inviter_tok: str, - invitee_user_id: str, - invitee_tok: str, - ) -> str: - """ - Helper to create a DM room as the "inviter" and invite the "invitee" user to the room. The - "invitee" user also will join the room. The `m.direct` account data will be set - for both users. - """ - - # Create a room and send an invite the other user - room_id = self.helper.create_room_as( - inviter_user_id, - is_public=False, - tok=inviter_tok, - ) - self.helper.invite( - room_id, - src=inviter_user_id, - targ=invitee_user_id, - tok=inviter_tok, - extra_data={"is_direct": True}, - ) - # Person that was invited joins the room - self.helper.join(room_id, invitee_user_id, tok=invitee_tok) - - # Mimic the client setting the room as a direct message in the global account - # data - self.get_success( - self.store.add_account_data_for_user( - invitee_user_id, - AccountDataTypes.DIRECT, - {inviter_user_id: [room_id]}, - ) - ) - self.get_success( - self.store.add_account_data_for_user( - inviter_user_id, - AccountDataTypes.DIRECT, - {invitee_user_id: [room_id]}, - ) - ) - - return room_id - - _remote_invite_count: int = 0 - - def _create_remote_invite_room_for_user( - self, - invitee_user_id: str, - unsigned_invite_room_state: Optional[List[StrippedStateEvent]], - ) -> str: - """ - Create a fake invite for a remote room and persist it. - - We don't have any state for these kind of rooms and can only rely on the - stripped state included in the unsigned portion of the invite event to identify - the room. - - Args: - invitee_user_id: The person being invited - unsigned_invite_room_state: List of stripped state events to assist the - receiver in identifying the room. - - Returns: - The room ID of the remote invite room - """ - invite_room_id = f"!test_room{self._remote_invite_count}:remote_server" - - invite_event_dict = { - "room_id": invite_room_id, - "sender": "@inviter:remote_server", - "state_key": invitee_user_id, - "depth": 1, - "origin_server_ts": 1, - "type": EventTypes.Member, - "content": {"membership": Membership.INVITE}, - "auth_events": [], - "prev_events": [], - } - if unsigned_invite_room_state is not None: - serialized_stripped_state_events = [] - for stripped_event in unsigned_invite_room_state: - serialized_stripped_state_events.append( - { - "type": stripped_event.type, - "state_key": stripped_event.state_key, - "sender": stripped_event.sender, - "content": stripped_event.content, - } - ) - - invite_event_dict["unsigned"] = { - "invite_room_state": serialized_stripped_state_events - } - - invite_event = make_event_from_dict( - invite_event_dict, - room_version=RoomVersions.V10, - ) - invite_event.internal_metadata.outlier = True - invite_event.internal_metadata.out_of_band_membership = True - - self.get_success( - self.store.maybe_store_room_on_outlier_membership( - room_id=invite_room_id, room_version=invite_event.room_version - ) - ) - context = EventContext.for_outlier(self.hs.get_storage_controllers()) - persist_controller = self.hs.get_storage_controllers().persistence - assert persist_controller is not None - self.get_success(persist_controller.persist_event(invite_event, context)) - - self._remote_invite_count += 1 - - return invite_room_id - - def test_filter_dm_rooms(self) -> None: - """ - Test `filter.is_dm` for DM rooms - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - # Create a normal room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a DM room - dm_room_id = self._create_dm_room( - inviter_user_id=user1_id, - inviter_tok=user1_tok, - invitee_user_id=user2_id, - invitee_tok=user2_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_dm=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_dm=True, - ), - after_rooms_token, - ) - ) - - self.assertEqual(truthy_filtered_room_map.keys(), {dm_room_id}) - - # Try with `is_dm=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_dm=False, - ), - after_rooms_token, - ) - ) - - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_rooms(self) -> None: - """ - Test `filter.is_encrypted` for encrypted rooms - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - ) - ) - - self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - ) - ) - - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_server_left_room(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` against a room that everyone has left. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - before_rooms_token = self.event_sources.get_current_token() - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - # Leave the room - self.helper.leave(room_id, user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - # Leave the room - self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - # We're using a `from_token` so that the room is considered `newly_left` and - # appears in our list of relevant sync rooms - from_token=before_rooms_token, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - ) - ) - - self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - ) - ) - - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_server_left_room2(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` against a room that everyone has - left. - - There is still someone local who is invited to the rooms but that doesn't affect - whether the server is participating in the room (users need to be joined). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - _user2_tok = self.login(user2_id, "pass") - - before_rooms_token = self.event_sources.get_current_token() - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - # Invite user2 - self.helper.invite(room_id, targ=user2_id, tok=user1_tok) - # User1 leaves the room - self.helper.leave(room_id, user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - # Invite user2 - self.helper.invite(encrypted_room_id, targ=user2_id, tok=user1_tok) - # User1 leaves the room - self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - # We're using a `from_token` so that the room is considered `newly_left` and - # appears in our list of relevant sync rooms - from_token=before_rooms_token, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - ) - ) - - self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - ) - ) - - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_after_we_left(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` against a room that was encrypted - after we left the room (make sure we don't just use the current state) - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - before_rooms_token = self.event_sources.get_current_token() - - # Create an unencrypted room - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - # Leave the room - self.helper.join(room_id, user1_id, tok=user1_tok) - self.helper.leave(room_id, user1_id, tok=user1_tok) - - # Create a room that will be encrypted - encrypted_after_we_left_room_id = self.helper.create_room_as( - user2_id, tok=user2_tok - ) - # Leave the room - self.helper.join(encrypted_after_we_left_room_id, user1_id, tok=user1_tok) - self.helper.leave(encrypted_after_we_left_room_id, user1_id, tok=user1_tok) - - # Encrypt the room after we've left - self.helper.send_state( - encrypted_after_we_left_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user2_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - # We're using a `from_token` so that the room is considered `newly_left` and - # appears in our list of relevant sync rooms - from_token=before_rooms_token, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - ) - ) - - # Even though we left the room before it was encrypted, we still see it because - # someone else on our server is still participating in the room and we "leak" - # the current state to the left user. But we consider the room encryption status - # to not be a secret given it's often set at the start of the room and it's one - # of the stripped state events that is normally handed out. - self.assertEqual( - truthy_filtered_room_map.keys(), {encrypted_after_we_left_room_id} - ) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - ) - ) - - # Even though we left the room before it was encrypted... (see comment above) - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_with_remote_invite_room_no_stripped_state(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` filter against a remote invite - room without any `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room without any `unsigned.invite_room_state` - _remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, None - ) - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should not appear because we can't figure out whether - # it is encrypted or not (no stripped state, `unsigned.invite_room_state`). - self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should not appear because we can't figure out whether - # it is encrypted or not (no stripped state, `unsigned.invite_room_state`). - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_with_remote_invite_encrypted_room(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` filter against a remote invite - encrypted room with some `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room with some `unsigned.invite_room_state` - # indicating that the room is encrypted. - remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, - [ - StrippedStateEvent( - type=EventTypes.Create, - state_key="", - sender="@inviter:remote_server", - content={ - EventContentFields.ROOM_CREATOR: "@inviter:remote_server", - EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, - }, - ), - StrippedStateEvent( - type=EventTypes.RoomEncryption, - state_key="", - sender="@inviter:remote_server", - content={ - EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", - }, - ), - ], - ) - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should appear here because it is encrypted - # according to the stripped state - self.assertEqual( - truthy_filtered_room_map.keys(), {encrypted_room_id, remote_invite_room_id} - ) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should not appear here because it is encrypted - # according to the stripped state - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_encrypted_with_remote_invite_unencrypted_room(self) -> None: - """ - Test that we can apply a `filter.is_encrypted` filter against a remote invite - unencrypted room with some `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room with some `unsigned.invite_room_state` - # but don't set any room encryption event. - remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, - [ - StrippedStateEvent( - type=EventTypes.Create, - state_key="", - sender="@inviter:remote_server", - content={ - EventContentFields.ROOM_CREATOR: "@inviter:remote_server", - EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, - }, - ), - # No room encryption event - ], - ) - - # Create an unencrypted room - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create an encrypted room - encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.send_state( - encrypted_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user1_tok, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_encrypted=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=True, - ), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should not appear here because it is unencrypted - # according to the stripped state - self.assertEqual(truthy_filtered_room_map.keys(), {encrypted_room_id}) - - # Try with `is_encrypted=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_encrypted=False, - ), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should appear because it is unencrypted according to - # the stripped state - self.assertEqual( - falsy_filtered_room_map.keys(), {room_id, remote_invite_room_id} - ) - - def test_filter_invite_rooms(self) -> None: - """ - Test `filter.is_invite` for rooms that the user has been invited to - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - user2_tok = self.login(user2_id, "pass") - - # Create a normal room - room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - - # Create a room that user1 is invited to - invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try with `is_invite=True` - truthy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_invite=True, - ), - after_rooms_token, - ) - ) - - self.assertEqual(truthy_filtered_room_map.keys(), {invite_room_id}) - - # Try with `is_invite=False` - falsy_filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - is_invite=False, - ), - after_rooms_token, - ) - ) - - self.assertEqual(falsy_filtered_room_map.keys(), {room_id}) - - def test_filter_room_types(self) -> None: - """ - Test `filter.room_types` for different room types - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - - # Create an arbitrarily typed room - foo_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": { - EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz" - } - }, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - # Try finding normal rooms and spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - room_types=[None, RoomTypes.SPACE] - ), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {room_id, space_room_id}) - - # Try finding an arbitrary room type - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - room_types=["org.matrix.foobarbaz"] - ), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {foo_room_id}) - - def test_filter_not_room_types(self) -> None: - """ - Test `filter.not_room_types` for different room types - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - - # Create an arbitrarily typed room - foo_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": { - EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz" - } - }, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try finding *NOT* normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(not_room_types=[None]), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {space_room_id, foo_room_id}) - - # Try finding *NOT* spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - not_room_types=[RoomTypes.SPACE] - ), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {room_id, foo_room_id}) - - # Try finding *NOT* normal rooms or spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - not_room_types=[None, RoomTypes.SPACE] - ), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {foo_room_id}) - - # Test how it behaves when we have both `room_types` and `not_room_types`. - # `not_room_types` should win. - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - room_types=[None], not_room_types=[None] - ), - after_rooms_token, - ) - ) - - # Nothing matches because nothing is both a normal room and not a normal room - self.assertEqual(filtered_room_map.keys(), set()) - - # Test how it behaves when we have both `room_types` and `not_room_types`. - # `not_room_types` should win. - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters( - room_types=[None, RoomTypes.SPACE], not_room_types=[None] - ), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - def test_filter_room_types_server_left_room(self) -> None: - """ - Test that we can apply a `filter.room_types` against a room that everyone has left. - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - before_rooms_token = self.event_sources.get_current_token() - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - # Leave the room - self.helper.leave(room_id, user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - # Leave the room - self.helper.leave(space_room_id, user1_id, tok=user1_tok) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - # We're using a `from_token` so that the room is considered `newly_left` and - # appears in our list of relevant sync rooms - from_token=before_rooms_token, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - def test_filter_room_types_server_left_room2(self) -> None: - """ - Test that we can apply a `filter.room_types` against a room that everyone has left. - - There is still someone local who is invited to the rooms but that doesn't affect - whether the server is participating in the room (users need to be joined). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - user2_id = self.register_user("user2", "pass") - _user2_tok = self.login(user2_id, "pass") - - before_rooms_token = self.event_sources.get_current_token() - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - # Invite user2 - self.helper.invite(room_id, targ=user2_id, tok=user1_tok) - # User1 leaves the room - self.helper.leave(room_id, user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - # Invite user2 - self.helper.invite(space_room_id, targ=user2_id, tok=user1_tok) - # User1 leaves the room - self.helper.leave(space_room_id, user1_id, tok=user1_tok) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - # We're using a `from_token` so that the room is considered `newly_left` and - # appears in our list of relevant sync rooms - from_token=before_rooms_token, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - ) - ) - - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - def test_filter_room_types_with_remote_invite_room_no_stripped_state(self) -> None: - """ - Test that we can apply a `filter.room_types` filter against a remote invite - room without any `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room without any `unsigned.invite_room_state` - _remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, None - ) - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should not appear because we can't figure out what - # room type it is (no stripped state, `unsigned.invite_room_state`) - self.assertEqual(filtered_room_map.keys(), {room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should not appear because we can't figure out what - # room type it is (no stripped state, `unsigned.invite_room_state`) - self.assertEqual(filtered_room_map.keys(), {space_room_id}) - - def test_filter_room_types_with_remote_invite_space(self) -> None: - """ - Test that we can apply a `filter.room_types` filter against a remote invite - to a space room with some `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room with some `unsigned.invite_room_state` indicating - # that it is a space room - remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, - [ - StrippedStateEvent( - type=EventTypes.Create, - state_key="", - sender="@inviter:remote_server", - content={ - EventContentFields.ROOM_CREATOR: "@inviter:remote_server", - EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, - # Specify that it is a space room - EventContentFields.ROOM_TYPE: RoomTypes.SPACE, - }, - ), - ], - ) - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should not appear here because it is a space room - # according to the stripped state - self.assertEqual(filtered_room_map.keys(), {room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should appear here because it is a space room - # according to the stripped state - self.assertEqual( - filtered_room_map.keys(), {space_room_id, remote_invite_room_id} - ) - - def test_filter_room_types_with_remote_invite_normal_room(self) -> None: - """ - Test that we can apply a `filter.room_types` filter against a remote invite - to a normal room with some `unsigned.invite_room_state` (stripped state). - """ - user1_id = self.register_user("user1", "pass") - user1_tok = self.login(user1_id, "pass") - - # Create a remote invite room with some `unsigned.invite_room_state` - # but the create event does not specify a room type (normal room) - remote_invite_room_id = self._create_remote_invite_room_for_user( - user1_id, - [ - StrippedStateEvent( - type=EventTypes.Create, - state_key="", - sender="@inviter:remote_server", - content={ - EventContentFields.ROOM_CREATOR: "@inviter:remote_server", - EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, - # No room type means this is a normal room - }, - ), - ], - ) - - # Create a normal room (no room type) - room_id = self.helper.create_room_as(user1_id, tok=user1_tok) - - # Create a space room - space_room_id = self.helper.create_room_as( - user1_id, - tok=user1_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - - after_rooms_token = self.event_sources.get_current_token() - - # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( - UserID.from_string(user1_id), - from_token=None, - to_token=after_rooms_token, - ) - - # Try finding only normal rooms - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[None]), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should appear here because it is a normal room - # according to the stripped state (no room type) - self.assertEqual(filtered_room_map.keys(), {room_id, remote_invite_room_id}) - - # Try finding only spaces - filtered_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms( - UserID.from_string(user1_id), - sync_room_map, - SlidingSyncConfig.SlidingSyncList.Filters(room_types=[RoomTypes.SPACE]), - after_rooms_token, - ) - ) - - # `remote_invite_room_id` should not appear here because it is a normal room - # according to the stripped state (no room type) - self.assertEqual(filtered_room_map.keys(), {space_room_id}) + self.assertTrue(room_id1 in newly_left) class SortRoomsTestCase(HomeserverTestCase): @@ -4361,25 +3615,26 @@ class SortRoomsTestCase(HomeserverTestCase): user: UserID, to_token: StreamToken, from_token: Optional[StreamToken], - ) -> Dict[str, _RoomMembershipForUser]: + ) -> Tuple[Dict[str, RoomsForUserType], AbstractSet[str], AbstractSet[str]]: """ Get the rooms the user should be syncing with """ - room_membership_for_user_map = self.get_success( - self.sliding_sync_handler.get_room_membership_for_user_at_to_token( + room_membership_for_user_map, newly_joined, newly_left = self.get_success( + self.sliding_sync_handler.room_lists.get_room_membership_for_user_at_to_token( user=user, from_token=from_token, to_token=to_token, ) ) filtered_sync_room_map = self.get_success( - self.sliding_sync_handler.filter_rooms_relevant_for_sync( + self.sliding_sync_handler.room_lists.filter_rooms_relevant_for_sync( user=user, room_membership_for_user_map=room_membership_for_user_map, + newly_left_room_ids=newly_left, ) ) - return filtered_sync_room_map + return filtered_sync_room_map, newly_joined, newly_left def test_sort_activity_basic(self) -> None: """ @@ -4400,7 +3655,7 @@ class SortRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -4408,7 +3663,7 @@ class SortRoomsTestCase(HomeserverTestCase): # Sort the rooms (what we're testing) sorted_sync_rooms = self.get_success( - self.sliding_sync_handler.sort_rooms( + self.sliding_sync_handler.room_lists.sort_rooms( sync_room_map=sync_room_map, to_token=after_rooms_token, ) @@ -4481,7 +3736,7 @@ class SortRoomsTestCase(HomeserverTestCase): self.helper.send(room_id3, "activity in room3", tok=user2_tok) # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=before_rooms_token, to_token=after_rooms_token, @@ -4489,7 +3744,7 @@ class SortRoomsTestCase(HomeserverTestCase): # Sort the rooms (what we're testing) sorted_sync_rooms = self.get_success( - self.sliding_sync_handler.sort_rooms( + self.sliding_sync_handler.room_lists.sort_rooms( sync_room_map=sync_room_map, to_token=after_rooms_token, ) @@ -4545,7 +3800,7 @@ class SortRoomsTestCase(HomeserverTestCase): after_rooms_token = self.event_sources.get_current_token() # Get the rooms the user should be syncing with - sync_room_map = self._get_sync_room_ids_for_user( + sync_room_map, newly_joined, newly_left = self._get_sync_room_ids_for_user( UserID.from_string(user1_id), from_token=None, to_token=after_rooms_token, @@ -4553,7 +3808,7 @@ class SortRoomsTestCase(HomeserverTestCase): # Sort the rooms (what we're testing) sorted_sync_rooms = self.get_success( - self.sliding_sync_handler.sort_rooms( + self.sliding_sync_handler.room_lists.sort_rooms( sync_room_map=sync_room_map, to_token=after_rooms_token, ) @@ -4565,3 +3820,1071 @@ class SortRoomsTestCase(HomeserverTestCase): # We only care about the *latest* event in the room. [room_id1, room_id2], ) + + +@attr.s(slots=True, auto_attribs=True, frozen=True) +class RequiredStateChangesTestParameters: + previous_required_state_map: Dict[str, Set[str]] + request_required_state_map: Dict[str, Set[str]] + state_deltas: StateMap[str] + expected_with_state_deltas: Tuple[ + Optional[Mapping[str, AbstractSet[str]]], StateFilter + ] + expected_without_state_deltas: Tuple[ + Optional[Mapping[str, AbstractSet[str]]], StateFilter + ] + + +class RequiredStateChangesTestCase(unittest.TestCase): + """Test cases for `_required_state_changes`""" + + @parameterized.expand( + [ + ( + "simple_no_change", + """Test no change to required state""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key"}}, + request_required_state_map={"type1": {"state_key"}}, + state_deltas={("type1", "state_key"): "$event_id"}, + # No changes + expected_with_state_deltas=(None, StateFilter.none()), + expected_without_state_deltas=(None, StateFilter.none()), + ), + ), + ( + "simple_add_type", + """Test adding a type to the config""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key"}}, + request_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {"state_key"}, "type2": {"state_key"}}, + # We should see the new type added + StateFilter.from_types([("type2", "state_key")]), + ), + expected_without_state_deltas=( + {"type1": {"state_key"}, "type2": {"state_key"}}, + StateFilter.from_types([("type2", "state_key")]), + ), + ), + ), + ( + "simple_add_type_from_nothing", + """Test adding a type to the config when previously requesting nothing""", + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {"state_key"}, "type2": {"state_key"}}, + # We should see the new types added + StateFilter.from_types( + [("type1", "state_key"), ("type2", "state_key")] + ), + ), + expected_without_state_deltas=( + {"type1": {"state_key"}, "type2": {"state_key"}}, + StateFilter.from_types( + [("type1", "state_key"), ("type2", "state_key")] + ), + ), + ), + ), + ( + "simple_add_state_key", + """Test adding a state key to the config""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type": {"state_key1"}}, + request_required_state_map={"type": {"state_key1", "state_key2"}}, + state_deltas={("type", "state_key2"): "$event_id"}, + expected_with_state_deltas=( + # We've added a key so we should persist the changed required state + # config. + {"type": {"state_key1", "state_key2"}}, + # We should see the new state_keys added + StateFilter.from_types([("type", "state_key2")]), + ), + expected_without_state_deltas=( + {"type": {"state_key1", "state_key2"}}, + StateFilter.from_types([("type", "state_key2")]), + ), + ), + ), + ( + "simple_retain_previous_state_keys", + """Test adding a state key to the config and retaining a previously sent state_key""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type": {"state_key1"}}, + request_required_state_map={"type": {"state_key2", "state_key3"}}, + state_deltas={("type", "state_key2"): "$event_id"}, + expected_with_state_deltas=( + # We've added a key so we should persist the changed required state + # config. + # + # Retain `state_key1` from the `previous_required_state_map` + {"type": {"state_key1", "state_key2", "state_key3"}}, + # We should see the new state_keys added + StateFilter.from_types( + [("type", "state_key2"), ("type", "state_key3")] + ), + ), + expected_without_state_deltas=( + {"type": {"state_key1", "state_key2", "state_key3"}}, + StateFilter.from_types( + [("type", "state_key2"), ("type", "state_key3")] + ), + ), + ), + ), + ( + "simple_remove_type", + """ + Test removing a type from the config when there are a matching state + delta does cause the persisted required state config to change + + Test removing a type from the config when there are no matching state + deltas does *not* cause the persisted required state config to change + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + request_required_state_map={"type1": {"state_key"}}, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type2` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type2`, we see that we haven't sent it before + # and send the new state. (we should still keep track that we've + # sent `type1` before). + {"type1": {"state_key"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type2` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type2` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "simple_remove_type_to_nothing", + """ + Test removing a type from the config and no longer requesting any state + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + request_required_state_map={}, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type2` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type2`, we see that we haven't sent it before + # and send the new state. (we should still keep track that we've + # sent `type1` before). + {"type1": {"state_key"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type2` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type2` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "simple_remove_state_key", + """ + Test removing a state_key from the config + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type": {"state_key1", "state_key2"}}, + request_required_state_map={"type": {"state_key1"}}, + state_deltas={("type", "state_key2"): "$event_id"}, + expected_with_state_deltas=( + # Remove `(type, state_key2)` since there's been a change + # to that state (persist the change to required state). + # That way next time, they request `(type, state_key2)`, we see + # that we haven't sent it before and send the new state. (we + # should still keep track that we've sent `(type, state_key1)` + # before). + {"type": {"state_key1"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `(type, state_key2)` is no longer requested but since that + # state hasn't changed, nothing should change (we should still + # keep track that we've sent `(type, state_key1)` and `(type, + # state_key2)` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "type_wildcards_add", + """ + Test adding a wildcard type causes the persisted required state config + to change and we request everything. + + If a event type wildcard has been added or removed we don't try and do + anything fancy, and instead always update the effective room required + state config to match the request. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key2"}}, + request_required_state_map={ + "type1": {"state_key2"}, + StateValues.WILDCARD: {"state_key"}, + }, + state_deltas={ + ("other_type", "state_key"): "$event_id", + }, + # We've added a wildcard, so we persist the change and request everything + expected_with_state_deltas=( + {"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}}, + StateFilter.all(), + ), + expected_without_state_deltas=( + {"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}}, + StateFilter.all(), + ), + ), + ), + ( + "type_wildcards_remove", + """ + Test removing a wildcard type causes the persisted required state config + to change and request nothing. + + If a event type wildcard has been added or removed we don't try and do + anything fancy, and instead always update the effective room required + state config to match the request. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key2"}, + StateValues.WILDCARD: {"state_key"}, + }, + request_required_state_map={"type1": {"state_key2"}}, + state_deltas={ + ("other_type", "state_key"): "$event_id", + }, + # We've removed a type wildcard, so we persist the change but don't request anything + expected_with_state_deltas=( + {"type1": {"state_key2"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + {"type1": {"state_key2"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_wildcards_add", + """Test adding a wildcard state_key""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key"}}, + request_required_state_map={ + "type1": {"state_key"}, + "type2": {StateValues.WILDCARD}, + }, + state_deltas={("type2", "state_key"): "$event_id"}, + # We've added a wildcard state_key, so we persist the change and + # request all of the state for that type + expected_with_state_deltas=( + {"type1": {"state_key"}, "type2": {StateValues.WILDCARD}}, + StateFilter.from_types([("type2", None)]), + ), + expected_without_state_deltas=( + {"type1": {"state_key"}, "type2": {StateValues.WILDCARD}}, + StateFilter.from_types([("type2", None)]), + ), + ), + ), + ( + "state_key_wildcards_remove", + """Test removing a wildcard state_key""", + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key"}, + "type2": {StateValues.WILDCARD}, + }, + request_required_state_map={"type1": {"state_key"}}, + state_deltas={("type2", "state_key"): "$event_id"}, + # We've removed a state_key wildcard, so we persist the change and + # request nothing + expected_with_state_deltas=( + {"type1": {"state_key"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + # We've removed a state_key wildcard but there have been no matching + # state changes, so no changes needed, just persist the + # `request_required_state_map` as-is. + expected_without_state_deltas=( + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_remove_some", + """ + Test that removing state keys work when only some of the state keys have + changed + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key1", "state_key2", "state_key3"} + }, + request_required_state_map={"type1": {"state_key1"}}, + state_deltas={("type1", "state_key3"): "$event_id"}, + expected_with_state_deltas=( + # We've removed some state keys from the type, but only state_key3 was + # changed so only that one should be removed. + {"type1": {"state_key1", "state_key2"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # No changes needed, just persist the + # `request_required_state_map` as-is + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_me_add", + """ + Test adding state keys work when using "$ME" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={"type1": {StateValues.ME}}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {StateValues.ME}}, + # We should see the new state_keys added + StateFilter.from_types([("type1", "@user:test")]), + ), + expected_without_state_deltas=( + {"type1": {StateValues.ME}}, + StateFilter.from_types([("type1", "@user:test")]), + ), + ), + ), + ( + "state_key_me_remove", + """ + Test removing state keys work when using "$ME" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {StateValues.ME}}, + request_required_state_map={}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type1` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type1`, we see that we haven't sent it before + # and send the new state. (if we were tracking that we sent any + # other state, we should still keep track that). + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type1` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type1` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_user_id_add", + """ + Test adding state keys work when using your own user ID + """, + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={"type1": {"@user:test"}}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {"@user:test"}}, + # We should see the new state_keys added + StateFilter.from_types([("type1", "@user:test")]), + ), + expected_without_state_deltas=( + {"type1": {"@user:test"}}, + StateFilter.from_types([("type1", "@user:test")]), + ), + ), + ), + ( + "state_key_me_remove", + """ + Test removing state keys work when using your own user ID + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"@user:test"}}, + request_required_state_map={}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type1` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type1`, we see that we haven't sent it before + # and send the new state. (if we were tracking that we sent any + # other state, we should still keep track that). + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type1` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type1` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_lazy_add", + """ + Test adding state keys work when using "$LAZY" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={EventTypes.Member: {StateValues.LAZY}}, + state_deltas={(EventTypes.Member, "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # If a "$LAZY" has been added or removed we always update the + # required state to what was requested for simplicity. + {EventTypes.Member: {StateValues.LAZY}}, + StateFilter.none(), + ), + expected_without_state_deltas=( + {EventTypes.Member: {StateValues.LAZY}}, + StateFilter.none(), + ), + ), + ), + ( + "state_key_lazy_remove", + """ + Test removing state keys work when using "$LAZY" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={EventTypes.Member: {StateValues.LAZY}}, + request_required_state_map={}, + state_deltas={(EventTypes.Member, "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # If a "$LAZY" has been added or removed we always update the + # required state to what was requested for simplicity. + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `EventTypes.Member` is no longer requested but since that + # state hasn't changed, nothing should change (we should still + # keep track that we've sent `EventTypes.Member` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_lazy_keep_previous_memberships_and_no_new_memberships", + """ + This test mimics a request with lazy-loading room members enabled where + we have previously sent down user2 and user3's membership events and now + we're sending down another response without any timeline events. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + } + }, + request_required_state_map={EventTypes.Member: {StateValues.LAZY}}, + state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"}, + expected_with_state_deltas=( + # Remove "@user2:test" since that state has changed and is no + # longer being requested anymore. Since something was removed, + # we should persist the changed to required state. That way next + # time, they request "@user2:test", we see that we haven't sent + # it before and send the new state. (we should still keep track + # that we've sent specific `EventTypes.Member` before) + { + EventTypes.Member: { + StateValues.LAZY, + "@user3:test", + } + }, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # We're not requesting any specific `EventTypes.Member` now but + # since that state hasn't changed, nothing should change (we + # should still keep track that we've sent specific + # `EventTypes.Member` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_lazy_keep_previous_memberships_with_new_memberships", + """ + This test mimics a request with lazy-loading room members enabled where + we have previously sent down user2 and user3's membership events and now + we're sending down another response with a new event from user4. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + } + }, + request_required_state_map={ + EventTypes.Member: {StateValues.LAZY, "@user4:test"} + }, + state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"}, + expected_with_state_deltas=( + # Since "@user4:test" was added, we should persist the changed + # required state config. + # + # Also remove "@user2:test" since that state has changed and is no + # longer being requested anymore. Since something was removed, + # we also should persist the changed to required state. That way next + # time, they request "@user2:test", we see that we haven't sent + # it before and send the new state. (we should still keep track + # that we've sent specific `EventTypes.Member` before) + { + EventTypes.Member: { + StateValues.LAZY, + "@user3:test", + "@user4:test", + } + }, + # We should see the new state_keys added + StateFilter.from_types([(EventTypes.Member, "@user4:test")]), + ), + expected_without_state_deltas=( + # Since "@user4:test" was added, we should persist the changed + # required state config. + { + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + "@user4:test", + } + }, + # We should see the new state_keys added + StateFilter.from_types([(EventTypes.Member, "@user4:test")]), + ), + ), + ), + ( + "state_key_expand_lazy_keep_previous_memberships", + """ + Test expanding the `required_state` to lazy-loading room members. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + EventTypes.Member: {"@user2:test", "@user3:test"} + }, + request_required_state_map={EventTypes.Member: {StateValues.LAZY}}, + state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"}, + expected_with_state_deltas=( + # Since `StateValues.LAZY` was added, we should persist the + # changed required state config. + # + # Also remove "@user2:test" since that state has changed and is no + # longer being requested anymore. Since something was removed, + # we also should persist the changed to required state. That way next + # time, they request "@user2:test", we see that we haven't sent + # it before and send the new state. (we should still keep track + # that we've sent specific `EventTypes.Member` before) + { + EventTypes.Member: { + StateValues.LAZY, + "@user3:test", + } + }, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # Since `StateValues.LAZY` was added, we should persist the + # changed required state config. + { + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + } + }, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_retract_lazy_keep_previous_memberships_no_new_memberships", + """ + Test retracting the `required_state` to no longer lazy-loading room members. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + } + }, + request_required_state_map={}, + state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"}, + expected_with_state_deltas=( + # Remove `EventTypes.Member` since there's been a change to that + # state, (persist the change to required state). That way next + # time, they request `EventTypes.Member`, we see that we haven't + # sent it before and send the new state. (if we were tracking + # that we sent any other state, we should still keep track + # that). + # + # This acts the same as the `simple_remove_type` test. It's + # possible that we could remember the specific `state_keys` that + # we have sent down before but this currently just acts the same + # as if a whole `type` was removed. Perhaps it's good that we + # "garbage collect" and forget what we've sent before for a + # given `type` when the client stops caring about a certain + # `type`. + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `EventTypes.Member` is no longer requested but since that + # state hasn't changed, nothing should change (we should still + # keep track that we've sent `EventTypes.Member` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_retract_lazy_keep_previous_memberships_with_new_memberships", + """ + Test retracting the `required_state` to no longer lazy-loading room members. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + } + }, + request_required_state_map={EventTypes.Member: {"@user4:test"}}, + state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"}, + expected_with_state_deltas=( + # Since "@user4:test" was added, we should persist the changed + # required state config. + # + # Also remove "@user2:test" since that state has changed and is no + # longer being requested anymore. Since something was removed, + # we also should persist the changed to required state. That way next + # time, they request "@user2:test", we see that we haven't sent + # it before and send the new state. (we should still keep track + # that we've sent specific `EventTypes.Member` before) + { + EventTypes.Member: { + "@user3:test", + "@user4:test", + } + }, + # We should see the new state_keys added + StateFilter.from_types([(EventTypes.Member, "@user4:test")]), + ), + expected_without_state_deltas=( + # Since "@user4:test" was added, we should persist the changed + # required state config. + { + EventTypes.Member: { + "@user2:test", + "@user3:test", + "@user4:test", + } + }, + # We should see the new state_keys added + StateFilter.from_types([(EventTypes.Member, "@user4:test")]), + ), + ), + ), + ( + "type_wildcard_with_state_key_wildcard_to_explicit_state_keys", + """ + Test switching from a wildcard ("*", "*") to explicit state keys + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + StateValues.WILDCARD: {StateValues.WILDCARD} + }, + request_required_state_map={ + StateValues.WILDCARD: {"state_key1", "state_key2", "state_key3"} + }, + state_deltas={("type1", "state_key1"): "$event_id"}, + # If we were previously fetching everything ("*", "*"), always update the effective + # room required state config to match the request. And since we we're previously + # already fetching everything, we don't have to fetch anything now that they've + # narrowed. + expected_with_state_deltas=( + { + StateValues.WILDCARD: { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + expected_without_state_deltas=( + { + StateValues.WILDCARD: { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + ), + ), + ( + "type_wildcard_with_explicit_state_keys_to_wildcard_state_key", + """ + Test switching from explicit to wildcard state keys ("*", "*") + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + StateValues.WILDCARD: {"state_key1", "state_key2", "state_key3"} + }, + request_required_state_map={ + StateValues.WILDCARD: {StateValues.WILDCARD} + }, + state_deltas={("type1", "state_key1"): "$event_id"}, + # We've added a wildcard, so we persist the change and request everything + expected_with_state_deltas=( + {StateValues.WILDCARD: {StateValues.WILDCARD}}, + StateFilter.all(), + ), + expected_without_state_deltas=( + {StateValues.WILDCARD: {StateValues.WILDCARD}}, + StateFilter.all(), + ), + ), + ), + ( + "state_key_wildcard_to_explicit_state_keys", + """Test switching from a wildcard to explicit state keys with a concrete type""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {StateValues.WILDCARD}}, + request_required_state_map={ + "type1": {"state_key1", "state_key2", "state_key3"} + }, + state_deltas={("type1", "state_key1"): "$event_id"}, + # If a state_key wildcard has been added or removed, we always + # update the effective room required state config to match the + # request. And since we we're previously already fetching + # everything, we don't have to fetch anything now that they've + # narrowed. + expected_with_state_deltas=( + { + "type1": { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + expected_without_state_deltas=( + { + "type1": { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + ), + ), + ( + "explicit_state_keys_to_wildcard_state_key", + """Test switching from a wildcard to explicit state keys with a concrete type""", + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key1", "state_key2", "state_key3"} + }, + request_required_state_map={"type1": {StateValues.WILDCARD}}, + state_deltas={("type1", "state_key1"): "$event_id"}, + # If a state_key wildcard has been added or removed, we always + # update the effective room required state config to match the + # request. And we need to request all of the state for that type + # because we previously, only sent down a few keys. + expected_with_state_deltas=( + {"type1": {StateValues.WILDCARD, "state_key2", "state_key3"}}, + StateFilter.from_types([("type1", None)]), + ), + expected_without_state_deltas=( + { + "type1": { + StateValues.WILDCARD, + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.from_types([("type1", None)]), + ), + ), + ), + ] + ) + def test_xxx( + self, + _test_label: str, + _test_description: str, + test_parameters: RequiredStateChangesTestParameters, + ) -> None: + # Without `state_deltas` + changed_required_state_map, added_state_filter = _required_state_changes( + user_id="@user:test", + prev_required_state_map=test_parameters.previous_required_state_map, + request_required_state_map=test_parameters.request_required_state_map, + state_deltas={}, + ) + + self.assertEqual( + changed_required_state_map, + test_parameters.expected_without_state_deltas[0], + "changed_required_state_map does not match (without state_deltas)", + ) + self.assertEqual( + added_state_filter, + test_parameters.expected_without_state_deltas[1], + "added_state_filter does not match (without state_deltas)", + ) + + # With `state_deltas` + changed_required_state_map, added_state_filter = _required_state_changes( + user_id="@user:test", + prev_required_state_map=test_parameters.previous_required_state_map, + request_required_state_map=test_parameters.request_required_state_map, + state_deltas=test_parameters.state_deltas, + ) + + self.assertEqual( + changed_required_state_map, + test_parameters.expected_with_state_deltas[0], + "changed_required_state_map does not match (with state_deltas)", + ) + self.assertEqual( + added_state_filter, + test_parameters.expected_with_state_deltas[1], + "added_state_filter does not match (with state_deltas)", + ) + + @parameterized.expand( + [ + # Test with a normal arbitrary type (no special meaning) + ("arbitrary_type", "type", set()), + # Test with membership + ("membership", EventTypes.Member, set()), + # Test with lazy-loading room members + ("lazy_loading_membership", EventTypes.Member, {StateValues.LAZY}), + ] + ) + def test_limit_retained_previous_state_keys( + self, + _test_label: str, + event_type: str, + extra_state_keys: Set[str], + ) -> None: + """ + Test that we limit the number of state_keys that we remember but always include + the state_keys that we've just requested. + """ + previous_required_state_map = { + event_type: { + # Prefix the state_keys we've "prev_"iously sent so they are easier to + # identify in our assertions. + f"prev_state_key{i}" + for i in range(MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER - 30) + } + | extra_state_keys + } + request_required_state_map = { + event_type: {f"state_key{i}" for i in range(50)} | extra_state_keys + } + + # (function under test) + changed_required_state_map, added_state_filter = _required_state_changes( + user_id="@user:test", + prev_required_state_map=previous_required_state_map, + request_required_state_map=request_required_state_map, + state_deltas={}, + ) + assert changed_required_state_map is not None + + # We should only remember up to the maximum number of state keys + self.assertGreaterEqual( + len(changed_required_state_map[event_type]), + # Most of the time this will be `MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER` but + # because we are just naively selecting enough previous state_keys to fill + # the limit, there might be some overlap in what's added back which means we + # might have slightly less than the limit. + # + # `extra_state_keys` overlaps in the previous and requested + # `required_state_map` so we might see this this scenario. + MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER - len(extra_state_keys), + ) + + # Should include all of the requested state + self.assertIncludes( + changed_required_state_map[event_type], + request_required_state_map[event_type], + ) + # And the rest is filled with the previous state keys + # + # We can't assert the exact state_keys since we don't know the order so we just + # check that they all start with "prev_" and that we have the correct amount. + remaining_state_keys = ( + changed_required_state_map[event_type] + - request_required_state_map[event_type] + ) + self.assertGreater( + len(remaining_state_keys), + 0, + ) + assert all( + state_key.startswith("prev_") for state_key in remaining_state_keys + ), "Remaining state_keys should be the previous state_keys" + + def test_request_more_state_keys_than_remember_limit(self) -> None: + """ + Test requesting more state_keys than fit in our limit to remember from previous + requests. + """ + previous_required_state_map = { + "type": { + # Prefix the state_keys we've "prev_"iously sent so they are easier to + # identify in our assertions. + f"prev_state_key{i}" + for i in range(MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER - 30) + } + } + request_required_state_map = { + "type": { + f"state_key{i}" + # Requesting more than the MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER + for i in range(MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER + 20) + } + } + # Ensure that we are requesting more than the limit + self.assertGreater( + len(request_required_state_map["type"]), + MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER, + ) + + # (function under test) + changed_required_state_map, added_state_filter = _required_state_changes( + user_id="@user:test", + prev_required_state_map=previous_required_state_map, + request_required_state_map=request_required_state_map, + state_deltas={}, + ) + assert changed_required_state_map is not None + + # Should include all of the requested state + self.assertIncludes( + changed_required_state_map["type"], + request_required_state_map["type"], + exact=True, + ) diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py
index fa55f76916..6b202dfbd5 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py
@@ -17,10 +17,11 @@ # [This file includes modifications made by New Vector Limited] # # +from http import HTTPStatus from typing import Collection, ContextManager, List, Optional from unittest.mock import AsyncMock, Mock, patch -from parameterized import parameterized +from parameterized import parameterized, parameterized_class from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor @@ -32,7 +33,13 @@ from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.federation.federation_base import event_from_pdu_json -from synapse.handlers.sync import SyncConfig, SyncRequestKey, SyncResult, SyncVersion +from synapse.handlers.sync import ( + SyncConfig, + SyncRequestKey, + SyncResult, + SyncVersion, + TimelineBatch, +) from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer @@ -58,9 +65,21 @@ def generate_request_key() -> SyncRequestKey: return ("request_key", _request_key) +@parameterized_class( + ("use_state_after",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'state_after' if params_dict['use_state_after'] else 'state'}", +) class SyncTestCase(tests.unittest.HomeserverTestCase): """Tests Sync Handler.""" + use_state_after: bool + servlets = [ admin.register_servlets, knock.register_servlets, @@ -79,7 +98,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): def test_wait_for_sync_for_user_auth_blocking(self) -> None: user_id1 = "@user1:test" user_id2 = "@user2:test" - sync_config = generate_sync_config(user_id1) + sync_config = generate_sync_config( + user_id1, use_state_after=self.use_state_after + ) requester = create_requester(user_id1) self.reactor.advance(100) # So we get not 0 time @@ -112,7 +133,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): self.auth_blocking._hs_disabled = False - sync_config = generate_sync_config(user_id2) + sync_config = generate_sync_config( + user_id2, use_state_after=self.use_state_after + ) requester = create_requester(user_id2) e = self.get_failure( @@ -141,7 +164,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): initial_result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, - sync_config=generate_sync_config(user, device_id="dev"), + sync_config=generate_sync_config( + user, device_id="dev", use_state_after=self.use_state_after + ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -175,7 +200,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, - sync_config=generate_sync_config(user), + sync_config=generate_sync_config( + user, use_state_after=self.use_state_after + ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -188,7 +215,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, - sync_config=generate_sync_config(user, device_id="dev"), + sync_config=generate_sync_config( + user, device_id="dev", use_state_after=self.use_state_after + ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), since_token=initial_result.next_batch, @@ -220,7 +249,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, - sync_config=generate_sync_config(user), + sync_config=generate_sync_config( + user, use_state_after=self.use_state_after + ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -233,7 +264,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, - sync_config=generate_sync_config(user, device_id="dev"), + sync_config=generate_sync_config( + user, device_id="dev", use_state_after=self.use_state_after + ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), since_token=initial_result.next_batch, @@ -276,7 +309,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): alice_sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(owner), - generate_sync_config(owner), + generate_sync_config(owner, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -296,7 +329,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # Eve syncs. eve_requester = create_requester(eve) - eve_sync_config = generate_sync_config(eve) + eve_sync_config = generate_sync_config( + eve, use_state_after=self.use_state_after + ) eve_sync_after_ban: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( eve_requester, @@ -313,7 +348,15 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # the prev_events used when creating the join event, such that the ban does not # precede the join. with self._patch_get_latest_events([last_room_creation_event_id]): - self.helper.join(room_id, eve, tok=eve_token) + self.helper.join( + room_id, + eve, + tok=eve_token, + # Previously, this join would succeed but now we expect it to fail at + # this point. The rest of the test is for the case when this used to + # succeed. + expect_code=HTTPStatus.FORBIDDEN, + ) # Eve makes a second, incremental sync. eve_incremental_sync_after_join: SyncResult = self.get_success( @@ -367,7 +410,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): initial_sync_result = self.get_success( self.sync_handler.wait_for_sync_for_user( alice_requester, - generate_sync_config(alice), + generate_sync_config(alice, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -396,6 +439,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): filter_collection=FilterCollection( self.hs, {"room": {"timeline": {"limit": 2}}} ), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -442,7 +486,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): initial_sync_result = self.get_success( self.sync_handler.wait_for_sync_for_user( alice_requester, - generate_sync_config(alice), + generate_sync_config(alice, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -481,6 +525,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): } }, ), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -518,6 +563,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): ... and a filter that means we only return 1 event, represented by the dashed horizontal lines: `S2` must be included in the `state` section on the second sync. + + When `use_state_after` is enabled, then we expect to see `s2` in the first sync. """ alice = self.register_user("alice", "password") alice_tok = self.login(alice, "password") @@ -528,7 +575,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): initial_sync_result = self.get_success( self.sync_handler.wait_for_sync_for_user( alice_requester, - generate_sync_config(alice), + generate_sync_config(alice, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -554,6 +601,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): filter_collection=FilterCollection( self.hs, {"room": {"timeline": {"limit": 1}}} ), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -567,10 +615,18 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): [e.event_id for e in room_sync.timeline.events], [e3_event], ) - self.assertEqual( - [e.event_id for e in room_sync.state.values()], - [], - ) + + if self.use_state_after: + # When using `state_after` we get told about s2 immediately + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [s2_event], + ) + else: + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [], + ) # Now send another event that points to S2, but not E3. with self._patch_get_latest_events([s2_event]): @@ -585,6 +641,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): filter_collection=FilterCollection( self.hs, {"room": {"timeline": {"limit": 1}}} ), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -598,10 +655,19 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): [e.event_id for e in room_sync.timeline.events], [e4_event], ) - self.assertEqual( - [e.event_id for e in room_sync.state.values()], - [s2_event], - ) + + if self.use_state_after: + # When using `state_after` we got told about s2 previously, so we + # don't again. + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [], + ) + else: + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [s2_event], + ) def test_state_includes_changes_on_ungappy_syncs(self) -> None: """Test `state` where the sync is not gappy. @@ -638,6 +704,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): This is the last chance for us to tell the client about S2, so it *must* be included in the response. + + When `use_state_after` is enabled, then we expect to see `s2` in the first sync. """ alice = self.register_user("alice", "password") alice_tok = self.login(alice, "password") @@ -648,7 +716,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): initial_sync_result = self.get_success( self.sync_handler.wait_for_sync_for_user( alice_requester, - generate_sync_config(alice), + generate_sync_config(alice, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -673,6 +741,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): filter_collection=FilterCollection( self.hs, {"room": {"timeline": {"limit": 1}}} ), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -684,7 +753,11 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): [e.event_id for e in room_sync.timeline.events], [e3_event], ) - self.assertNotIn(s2_event, [e.event_id for e in room_sync.state.values()]) + if self.use_state_after: + # When using `state_after` we get told about s2 immediately + self.assertIn(s2_event, [e.event_id for e in room_sync.state.values()]) + else: + self.assertNotIn(s2_event, [e.event_id for e in room_sync.state.values()]) # More events, E4 and E5 with self._patch_get_latest_events([e3_event]): @@ -695,7 +768,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): incremental_sync = self.get_success( self.sync_handler.wait_for_sync_for_user( alice_requester, - generate_sync_config(alice), + generate_sync_config(alice, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), since_token=initial_sync_result.next_batch, @@ -710,10 +783,19 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): [e.event_id for e in room_sync.timeline.events], [e4_event, e5_event], ) - self.assertEqual( - [e.event_id for e in room_sync.state.values()], - [s2_event], - ) + + if self.use_state_after: + # When using `state_after` we got told about s2 previously, so we + # don't again. + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [], + ) + else: + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [s2_event], + ) @parameterized.expand( [ @@ -721,7 +803,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): (True, False), (False, True), (True, True), - ] + ], + name_func=lambda func, num, p: f"{func.__name__}_{p.args[0]}_{p.args[1]}", ) def test_archived_rooms_do_not_include_state_after_leave( self, initial_sync: bool, empty_timeline: bool @@ -749,7 +832,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): initial_sync_result = self.get_success( self.sync_handler.wait_for_sync_for_user( bob_requester, - generate_sync_config(bob), + generate_sync_config(bob, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -780,7 +863,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): self.sync_handler.wait_for_sync_for_user( bob_requester, generate_sync_config( - bob, filter_collection=FilterCollection(self.hs, filter_dict) + bob, + filter_collection=FilterCollection(self.hs, filter_dict), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -791,7 +876,15 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): if empty_timeline: # The timeline should be empty self.assertEqual(sync_room_result.timeline.events, []) + else: + # The last three events in the timeline should be those leading up to the + # leave + self.assertEqual( + [e.event_id for e in sync_room_result.timeline.events[-3:]], + [before_message_event, before_state_event, leave_event], + ) + if empty_timeline or self.use_state_after: # And the state should include the leave event... self.assertEqual( sync_room_result.state[("m.room.member", bob)].event_id, leave_event @@ -801,12 +894,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): sync_room_result.state[("test_state", "")].event_id, before_state_event ) else: - # The last three events in the timeline should be those leading up to the - # leave - self.assertEqual( - [e.event_id for e in sync_room_result.timeline.events[-3:]], - [before_message_event, before_state_event, leave_event], - ) # ... And the state should be empty self.assertEqual(sync_room_result.state, {}) @@ -843,7 +930,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): ) -> List[EventBase]: return list(pdus) - self.client._check_sigs_and_hash_for_pulled_events_and_fetch = _check_sigs_and_hash_for_pulled_events_and_fetch # type: ignore[assignment] + self.client._check_sigs_and_hash_for_pulled_events_and_fetch = ( # type: ignore[method-assign] + _check_sigs_and_hash_for_pulled_events_and_fetch # type: ignore[assignment] + ) prev_events = self.get_success(self.store.get_prev_events_for_room(room_id)) @@ -877,7 +966,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(user), - generate_sync_config(user), + generate_sync_config(user, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -926,7 +1015,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): private_sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(user2), - generate_sync_config(user2), + generate_sync_config(user2, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -952,7 +1041,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(user), - generate_sync_config(user), + generate_sync_config(user, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -989,7 +1078,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): sync_d = defer.ensureDeferred( self.sync_handler.wait_for_sync_for_user( create_requester(user), - generate_sync_config(user), + generate_sync_config(user, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), since_token=since_token, @@ -1044,7 +1133,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): sync_d = defer.ensureDeferred( self.sync_handler.wait_for_sync_for_user( create_requester(user), - generate_sync_config(user), + generate_sync_config(user, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), since_token=since_token, @@ -1060,6 +1149,7 @@ def generate_sync_config( user_id: str, device_id: Optional[str] = "device_id", filter_collection: Optional[FilterCollection] = None, + use_state_after: bool = False, ) -> SyncConfig: """Generate a sync config (with a unique request key). @@ -1067,7 +1157,8 @@ def generate_sync_config( user_id: user who is syncing. device_id: device that is syncing. Defaults to "device_id". filter_collection: filter to apply. Defaults to the default filter (ie, - return everything, with a default limit) + return everything, with a default limit) + use_state_after: whether the `use_state_after` flag was set. """ if filter_collection is None: filter_collection = Filtering(Mock()).DEFAULT_FILTER_COLLECTION @@ -1077,4 +1168,138 @@ def generate_sync_config( filter_collection=filter_collection, is_guest=False, device_id=device_id, + use_state_after=use_state_after, ) + + +class SyncStateAfterTestCase(tests.unittest.HomeserverTestCase): + """Tests Sync Handler state behavior when using `use_state_after.""" + + servlets = [ + admin.register_servlets, + knock.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.sync_handler = self.hs.get_sync_handler() + self.store = self.hs.get_datastores().main + + # AuthBlocking reads from the hs' config on initialization. We need to + # modify its config instead of the hs' + self.auth_blocking = self.hs.get_auth_blocking() + + def test_initial_sync_multiple_deltas(self) -> None: + """Test that if multiple state deltas have happened during processing of + a full state sync we return the correct state""" + + user = self.register_user("user", "password") + tok = self.login("user", "password") + + # Create a room as the user and set some custom state. + joined_room = self.helper.create_room_as(user, tok=tok) + + first_state = self.helper.send_state( + joined_room, event_type="m.test_event", body={"num": 1}, tok=tok + ) + + # Take a snapshot of the stream token, to simulate doing an initial sync + # at this point. + end_stream_token = self.hs.get_event_sources().get_current_token() + + # Send some state *after* the stream token + self.helper.send_state( + joined_room, event_type="m.test_event", body={"num": 2}, tok=tok + ) + + # Calculating the full state will return the first state, and not the + # second. + state = self.get_success( + self.sync_handler._compute_state_delta_for_full_sync( + room_id=joined_room, + sync_config=generate_sync_config(user, use_state_after=True), + batch=TimelineBatch( + prev_batch=end_stream_token, events=[], limited=True + ), + end_token=end_stream_token, + members_to_fetch=None, + timeline_state={}, + joined=True, + ) + ) + self.assertEqual(state[("m.test_event", "")], first_state["event_id"]) + + def test_incremental_sync_multiple_deltas(self) -> None: + """Test that if multiple state deltas have happened since an incremental + state sync we return the correct state""" + + user = self.register_user("user", "password") + tok = self.login("user", "password") + + # Create a room as the user and set some custom state. + joined_room = self.helper.create_room_as(user, tok=tok) + + # Take a snapshot of the stream token, to simulate doing an incremental sync + # from this point. + since_token = self.hs.get_event_sources().get_current_token() + + self.helper.send_state( + joined_room, event_type="m.test_event", body={"num": 1}, tok=tok + ) + + # Send some state *after* the stream token + second_state = self.helper.send_state( + joined_room, event_type="m.test_event", body={"num": 2}, tok=tok + ) + + end_stream_token = self.hs.get_event_sources().get_current_token() + + # Calculating the incrementals state will return the second state, and not the + # first. + state = self.get_success( + self.sync_handler._compute_state_delta_for_incremental_sync( + room_id=joined_room, + sync_config=generate_sync_config(user, use_state_after=True), + batch=TimelineBatch( + prev_batch=end_stream_token, events=[], limited=True + ), + since_token=since_token, + end_token=end_stream_token, + members_to_fetch=None, + timeline_state={}, + ) + ) + self.assertEqual(state[("m.test_event", "")], second_state["event_id"]) + + def test_incremental_sync_lazy_loaded_no_timeline(self) -> None: + """Test that lazy-loading with an empty timeline doesn't return the full + state. + + There was a bug where an empty state filter would cause the DB to return + the full state, rather than an empty set. + """ + user = self.register_user("user", "password") + tok = self.login("user", "password") + + # Create a room as the user and set some custom state. + joined_room = self.helper.create_room_as(user, tok=tok) + + since_token = self.hs.get_event_sources().get_current_token() + end_stream_token = self.hs.get_event_sources().get_current_token() + + state = self.get_success( + self.sync_handler._compute_state_delta_for_incremental_sync( + room_id=joined_room, + sync_config=generate_sync_config(user, use_state_after=True), + batch=TimelineBatch( + prev_batch=end_stream_token, events=[], limited=True + ), + since_token=since_token, + end_token=end_stream_token, + members_to_fetch=set(), + timeline_state={}, + ) + ) + + self.assertEqual(state, {}) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 878d9683b6..b12ffc3665 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py
@@ -796,6 +796,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 1) + # Kept old spam checker without `requester_id` tests for backwards compatibility. async def allow_all(user_profile: UserProfile) -> bool: # Allow all users. return False @@ -809,6 +810,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 1) + # Kept old spam checker without `requester_id` tests for backwards compatibility. # Configure a spam checker that filters all users. async def block_all(user_profile: UserProfile) -> bool: # All users are spammy. @@ -820,6 +822,40 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 0) + async def allow_all_expects_requester_id( + user_profile: UserProfile, requester_id: str + ) -> bool: + self.assertEqual(requester_id, u1) + # Allow all users. + return False + + # Configure a spam checker that does not filter any users. + spam_checker = self.hs.get_module_api_callbacks().spam_checker + spam_checker._check_username_for_spam_callbacks = [ + allow_all_expects_requester_id + ] + + # The results do not change: + # We get one search result when searching for user2 by user1. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 1) + + # Configure a spam checker that filters all users. + async def block_all_expects_requester_id( + user_profile: UserProfile, requester_id: str + ) -> bool: + self.assertEqual(requester_id, u1) + # All users are spammy. + return True + + spam_checker._check_username_for_spam_callbacks = [ + block_all_expects_requester_id + ] + + # User1 now gets no search results for any of the other users. + s = self.get_success(self.handler.search_users(u1, "user2", 10)) + self.assertEqual(len(s["results"]), 0) + @override_config( { "spam_checker": { @@ -956,6 +992,67 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): [self.assertIn(user, local_users) for user in received_user_id_ordering[:3]] [self.assertIn(user, remote_users) for user in received_user_id_ordering[3:]] + @override_config( + { + "user_directory": { + "enabled": True, + "search_all_users": True, + "exclude_remote_users": True, + } + } + ) + def test_exclude_remote_users(self) -> None: + """Tests that only local users are returned when + user_directory.exclude_remote_users is True. + """ + + # Create a room and few users to test the directory with + searching_user = self.register_user("searcher", "password") + searching_user_tok = self.login("searcher", "password") + + room_id = self.helper.create_room_as( + searching_user, + room_version=RoomVersions.V1.identifier, + tok=searching_user_tok, + ) + + # Create a few local users and join them to the room + local_user_1 = self.register_user("user_xxxxx", "password") + local_user_2 = self.register_user("user_bbbbb", "password") + local_user_3 = self.register_user("user_zzzzz", "password") + + self._add_user_to_room(room_id, RoomVersions.V1, local_user_1) + self._add_user_to_room(room_id, RoomVersions.V1, local_user_2) + self._add_user_to_room(room_id, RoomVersions.V1, local_user_3) + + # Create a few "remote" users and join them to the room + remote_user_1 = "@user_aaaaa:remote_server" + remote_user_2 = "@user_yyyyy:remote_server" + remote_user_3 = "@user_ccccc:remote_server" + self._add_user_to_room(room_id, RoomVersions.V1, remote_user_1) + self._add_user_to_room(room_id, RoomVersions.V1, remote_user_2) + self._add_user_to_room(room_id, RoomVersions.V1, remote_user_3) + + local_users = [local_user_1, local_user_2, local_user_3] + remote_users = [remote_user_1, remote_user_2, remote_user_3] + + # The local searching user searches for the term "user", which other users have + # in their user id + results = self.get_success( + self.handler.search_users(searching_user, "user", 20) + )["results"] + received_user_ids = [result["user_id"] for result in results] + + for user in local_users: + self.assertIn( + user, received_user_ids, f"Local user {user} not found in results" + ) + + for user in remote_users: + self.assertNotIn( + user, received_user_ids, f"Remote user {user} should not be in results" + ) + def _add_user_to_room( self, room_id: str, @@ -1081,10 +1178,10 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): for use_numeric in [False, True]: if use_numeric: prefix1 = f"{i}" - prefix2 = f"{i+1}" + prefix2 = f"{i + 1}" else: prefix1 = f"a{i}" - prefix2 = f"a{i+1}" + prefix2 = f"a{i + 1}" local_user_1 = self.register_user(f"user{char}{prefix1}", "password") local_user_2 = self.register_user(f"user{char}{prefix2}", "password") diff --git a/tests/handlers/test_worker_lock.py b/tests/handlers/test_worker_lock.py
index 6e9a15c8ee..0691d3f99c 100644 --- a/tests/handlers/test_worker_lock.py +++ b/tests/handlers/test_worker_lock.py
@@ -19,6 +19,9 @@ # # +import logging +import platform + from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor @@ -29,6 +32,8 @@ from tests import unittest from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.utils import test_timeout +logger = logging.getLogger(__name__) + class WorkerLockTestCase(unittest.HomeserverTestCase): def prepare( @@ -53,12 +58,27 @@ class WorkerLockTestCase(unittest.HomeserverTestCase): def test_lock_contention(self) -> None: """Test lock contention when a lot of locks wait on a single worker""" - + nb_locks_to_test = 500 + current_machine = platform.machine().lower() + if current_machine.startswith("riscv"): + # RISC-V specific settings + timeout_seconds = 15 # Increased timeout for RISC-V + # add a print or log statement here for visibility in CI logs + logger.info( # use logger.info + f"Detected RISC-V architecture ({current_machine}). " + f"Adjusting test_lock_contention: timeout={timeout_seconds}s" + ) + else: + # Settings for other architectures + timeout_seconds = 5 # It takes around 0.5s on a 5+ years old laptop - with test_timeout(5): - nb_locks = 500 - d = self._take_locks(nb_locks) - self.assertEqual(self.get_success(d), nb_locks) + with test_timeout(timeout_seconds): # Use the dynamically set timeout + d = self._take_locks( + nb_locks_to_test + ) # Use the (potentially adjusted) number of locks + self.assertEqual( + self.get_success(d), nb_locks_to_test + ) # Assert against the used number of locks async def _take_locks(self, nb_locks: int) -> int: locks = [ diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py
index 8e8621e348..ffcbf4b3ca 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py
@@ -93,9 +93,7 @@ class SrvResolverTestCase(unittest.TestCase): resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) servers: List[Server] - servers = yield defer.ensureDeferred( - resolver.resolve_service(service_name) - ) # type: ignore[assignment] + servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) # type: ignore[assignment] dns_client_mock.lookupService.assert_called_once_with(service_name) @@ -122,9 +120,7 @@ class SrvResolverTestCase(unittest.TestCase): ) servers: List[Server] - servers = yield defer.ensureDeferred( - resolver.resolve_service(service_name) - ) # type: ignore[assignment] + servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) # type: ignore[assignment] self.assertFalse(dns_client_mock.lookupService.called) @@ -157,9 +153,7 @@ class SrvResolverTestCase(unittest.TestCase): resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) servers: List[Server] - servers = yield defer.ensureDeferred( - resolver.resolve_service(service_name) - ) # type: ignore[assignment] + servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) # type: ignore[assignment] self.assertEqual(len(servers), 0) self.assertEqual(len(cache), 0) diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py
index 731b0c4e59..dff5a5d262 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py
@@ -27,6 +27,7 @@ from typing import ( Callable, ContextManager, Dict, + Generator, List, Optional, Set, @@ -49,7 +50,10 @@ from synapse.http.server import ( respond_with_json, ) from synapse.http.site import SynapseRequest -from synapse.logging.context import LoggingContext, make_deferred_yieldable +from synapse.logging.context import ( + LoggingContext, + make_deferred_yieldable, +) from synapse.types import JsonDict from tests.server import FakeChannel, make_request @@ -199,7 +203,7 @@ def make_request_with_cancellation_test( # # We would like to trigger a cancellation at the first `await`, re-run the # request and cancel at the second `await`, and so on. By patching - # `Deferred.__next__`, we can intercept `await`s, track which ones we have or + # `Deferred.__await__`, we can intercept `await`s, track which ones we have or # have not seen, and force them to block when they wouldn't have. # The set of previously seen `await`s. @@ -211,7 +215,7 @@ def make_request_with_cancellation_test( ) for request_number in itertools.count(1): - deferred_patch = Deferred__next__Patch(seen_awaits, request_number) + deferred_patch = Deferred__await__Patch(seen_awaits, request_number) try: with mock.patch( @@ -250,6 +254,8 @@ def make_request_with_cancellation_test( ) if respond_mock.called: + _log_for_request(request_number, "--- response finished ---") + # The request ran to completion and we are done with testing it. # `respond_with_json` writes the response asynchronously, so we @@ -311,8 +317,8 @@ def make_request_with_cancellation_test( assert False, "unreachable" # noqa: B011 -class Deferred__next__Patch: - """A `Deferred.__next__` patch that will intercept `await`s and force them +class Deferred__await__Patch: + """A `Deferred.__await__` patch that will intercept `await`s and force them to block once it sees a new `await`. When done with the patch, `unblock_awaits()` must be called to clean up after any @@ -322,7 +328,7 @@ class Deferred__next__Patch: Usage: seen_awaits = set() - deferred_patch = Deferred__next__Patch(seen_awaits, 1) + deferred_patch = Deferred__await__Patch(seen_awaits, 1) try: with deferred_patch.patch(): # do things @@ -335,14 +341,14 @@ class Deferred__next__Patch: """ Args: seen_awaits: The set of stack traces of `await`s that have been previously - seen. When the `Deferred.__next__` patch sees a new `await`, it will add + seen. When the `Deferred.__await__` patch sees a new `await`, it will add it to the set. request_number: The request number to log against. """ self._request_number = request_number self._seen_awaits = seen_awaits - self._original_Deferred___next__ = Deferred.__next__ # type: ignore[misc,unused-ignore] + self._original_Deferred__await__ = Deferred.__await__ # type: ignore[misc,unused-ignore] # The number of `await`s on `Deferred`s we have seen so far. self.awaits_seen = 0 @@ -350,8 +356,13 @@ class Deferred__next__Patch: # Whether we have seen a new `await` not in `seen_awaits`. self.new_await_seen = False + # Whether to block new await points we see. This gets set to False once + # we have cancelled the request to allow things to run after + # cancellation. + self._block_new_awaits = True + # To force `await`s on resolved `Deferred`s to block, we make up a new - # unresolved `Deferred` and return it out of `Deferred.__next__` / + # unresolved `Deferred` and return it out of `Deferred.__await__` / # `coroutine.send()`. We have to resolve it later, in case the `await`ing # coroutine is part of some shared processing, such as `@cached`. self._to_unblock: Dict[Deferred, Union[object, Failure]] = {} @@ -360,15 +371,15 @@ class Deferred__next__Patch: self._previous_stack: List[inspect.FrameInfo] = [] def patch(self) -> ContextManager[Mock]: - """Returns a context manager which patches `Deferred.__next__`.""" + """Returns a context manager which patches `Deferred.__await__`.""" - def Deferred___next__( - deferred: "Deferred[T]", value: object = None - ) -> "Deferred[T]": - """Intercepts `await`s on `Deferred`s and rigs them to block once we have - seen enough of them. + def Deferred___await__( + deferred: "Deferred[T]", + ) -> Generator["Deferred[T]", None, T]: + """Intercepts calls to `__await__`, which returns a generator + yielding deferreds that we await on. - `Deferred.__next__` will normally: + The generator for `__await__` will normally: * return `self` if the `Deferred` is unresolved, in which case `coroutine.send()` will return the `Deferred`, and `_defer.inlineCallbacks` will stop running the coroutine until the @@ -376,9 +387,43 @@ class Deferred__next__Patch: * raise a `StopIteration(result)`, containing the result of the `await`. * raise another exception, which will come out of the `await`. """ + + # Get the original generator. + gen = self._original_Deferred__await__(deferred) + + # Run the generator, handling each iteration to see if we need to + # block. + try: + while True: + # We've hit a new await point (or the deferred has + # completed), handle it. + handle_next_iteration(deferred) + + # Continue on. + yield gen.send(None) + except StopIteration as e: + # We need to convert `StopIteration` into a normal return. + return e.value + + def handle_next_iteration( + deferred: "Deferred[T]", + ) -> None: + """Intercepts `await`s on `Deferred`s and rigs them to block once we have + seen enough of them. + + Args: + deferred: The deferred that we've captured and are intercepting + `await` calls within. + """ + if not self._block_new_awaits: + # We're no longer blocking awaits points + return + self.awaits_seen += 1 - stack = _get_stack(skip_frames=1) + stack = _get_stack( + skip_frames=2 # Ignore this function and `Deferred___await__` in stack trace + ) stack_hash = _hash_stack(stack) if stack_hash not in self._seen_awaits: @@ -389,20 +434,29 @@ class Deferred__next__Patch: if not self.new_await_seen: # This `await` isn't interesting. Let it proceed normally. + _log_await_stack( + stack, + self._previous_stack, + self._request_number, + "already seen", + ) + # Don't log the stack. It's been seen before in a previous run. self._previous_stack = stack - return self._original_Deferred___next__(deferred, value) + return # We want to block at the current `await`. if deferred.called and not deferred.paused: - # This `Deferred` already has a result. - # We return a new, unresolved, `Deferred` for `_inlineCallbacks` to wait - # on. This blocks the coroutine that did this `await`. + # This `Deferred` already has a result. We chain a new, + # unresolved, `Deferred` to the end of this Deferred that it + # will wait on. This blocks the coroutine that did this `await`. # We queue it up for unblocking later. new_deferred: "Deferred[T]" = Deferred() self._to_unblock[new_deferred] = deferred.result + deferred.addBoth(lambda _: make_deferred_yieldable(new_deferred)) + _log_await_stack( stack, self._previous_stack, @@ -411,7 +465,9 @@ class Deferred__next__Patch: ) self._previous_stack = stack - return make_deferred_yieldable(new_deferred) + # Continue iterating on the deferred now that we've blocked it + # again. + return # This `Deferred` does not have a result yet. # The `await` will block normally, so we don't have to do anything. @@ -423,9 +479,9 @@ class Deferred__next__Patch: ) self._previous_stack = stack - return self._original_Deferred___next__(deferred, value) + return - return mock.patch.object(Deferred, "__next__", new=Deferred___next__) + return mock.patch.object(Deferred, "__await__", new=Deferred___await__) def unblock_awaits(self) -> None: """Unblocks any shared processing that we forced to block. @@ -433,6 +489,9 @@ class Deferred__next__Patch: Must be called when done, otherwise processing shared between multiple requests, such as database queries started by `@cached`, will become permanently stuck. """ + # Also disable blocking at future await points + self._block_new_awaits = False + to_unblock = self._to_unblock self._to_unblock = {} for deferred, result in to_unblock.items(): diff --git a/tests/http/test_client.py b/tests/http/test_client.py
index 721917f957..ac6470ebbd 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py
@@ -49,8 +49,11 @@ from tests.unittest import TestCase class ReadMultipartResponseTests(TestCase): - data1 = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_" - data2 = b"to_stream\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n" + multipart_response_data1 = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_" + multipart_response_data2 = ( + b"to_stream\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n" + ) + multipart_response_data_cased = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\ncOntEnt-type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-tyPe: text/plain\r\nconTent-dispOsition: inline; filename=test_upload\r\n\r\nfile_" redirect_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nLocation: https://cdn.example.org/ab/c1/2345.txt\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n" @@ -103,8 +106,31 @@ class ReadMultipartResponseTests(TestCase): result, deferred, protocol = self._build_multipart_response(249, 250) # Start sending data. - protocol.dataReceived(self.data1) - protocol.dataReceived(self.data2) + protocol.dataReceived(self.multipart_response_data1) + protocol.dataReceived(self.multipart_response_data2) + # Close the connection. + protocol.connectionLost(Failure(ResponseDone())) + + multipart_response: MultipartResponse = deferred.result # type: ignore[assignment] + + self.assertEqual(multipart_response.json, b"{}") + self.assertEqual(result.getvalue(), b"file_to_stream") + self.assertEqual(multipart_response.length, len(b"file_to_stream")) + self.assertEqual(multipart_response.content_type, b"text/plain") + self.assertEqual( + multipart_response.disposition, b"inline; filename=test_upload" + ) + + def test_parse_file_lowercase_headers(self) -> None: + """ + Check that a multipart response containing a file is properly parsed + into the json/file parts, and the json and file are properly captured if the http headers are lowercased + """ + result, deferred, protocol = self._build_multipart_response(249, 250) + + # Start sending data. + protocol.dataReceived(self.multipart_response_data_cased) + protocol.dataReceived(self.multipart_response_data2) # Close the connection. protocol.connectionLost(Failure(ResponseDone())) @@ -143,7 +169,7 @@ class ReadMultipartResponseTests(TestCase): result, deferred, protocol = self._build_multipart_response(UNKNOWN_LENGTH, 180) # Start sending data. - protocol.dataReceived(self.data1) + protocol.dataReceived(self.multipart_response_data1) self.assertEqual(result.getvalue(), b"file_") self._assert_error(deferred, protocol) @@ -154,11 +180,11 @@ class ReadMultipartResponseTests(TestCase): result, deferred, protocol = self._build_multipart_response(UNKNOWN_LENGTH, 180) # Start sending data. - protocol.dataReceived(self.data1) + protocol.dataReceived(self.multipart_response_data1) self._assert_error(deferred, protocol) # More data might have come in. - protocol.dataReceived(self.data2) + protocol.dataReceived(self.multipart_response_data2) self.assertEqual(result.getvalue(), b"file_") self._assert_error(deferred, protocol) @@ -172,7 +198,7 @@ class ReadMultipartResponseTests(TestCase): self.assertFalse(deferred.called) # Start sending data. - protocol.dataReceived(self.data1) + protocol.dataReceived(self.multipart_response_data1) self._assert_error(deferred, protocol) self._cleanup_error(deferred) @@ -181,7 +207,9 @@ class ReadMultipartResponseTests(TestCase): class ReadBodyWithMaxSizeTests(TestCase): - def _build_response(self, length: Union[int, str] = UNKNOWN_LENGTH) -> Tuple[ + def _build_response( + self, length: Union[int, str] = UNKNOWN_LENGTH + ) -> Tuple[ BytesIO, "Deferred[int]", _DiscardBodyWithMaxSizeProtocol, diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py
index e2f033fdae..d5ebf10eac 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py
@@ -17,6 +17,7 @@ # [This file includes modifications made by New Vector Limited] # # +import io from typing import Any, Dict, Generator from unittest.mock import ANY, Mock, create_autospec @@ -32,7 +33,9 @@ from twisted.web.http import HTTPChannel from twisted.web.http_headers import Headers from synapse.api.errors import HttpResponseException, RequestSendFailed +from synapse.api.ratelimiting import Ratelimiter from synapse.config._base import ConfigError +from synapse.config.ratelimiting import RatelimitSettings from synapse.http.matrixfederationclient import ( ByteParser, MatrixFederationHttpClient, @@ -337,6 +340,81 @@ class FederationClientTests(HomeserverTestCase): r = self.successResultOf(d) self.assertEqual(r.code, 200) + def test_authed_media_redirect_response(self) -> None: + """ + Validate that, when following a `Location` redirect, the + maximum size is _not_ set to the initial response `Content-Length` and + the media file can be downloaded. + """ + limiter = Ratelimiter( + store=self.hs.get_datastores().main, + clock=self.clock, + cfg=RatelimitSettings(key="", per_second=0.17, burst_count=1048576), + ) + + output_stream = io.BytesIO() + + d = defer.ensureDeferred( + self.cl.federation_get_file( + "testserv:8008", "path", output_stream, limiter, "127.0.0.1", 10000 + ) + ) + + self.pump() + + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8008) + + # complete the connection and wire it up to a fake transport + protocol = factory.buildProtocol(None) + transport = StringTransport() + protocol.makeConnection(transport) + + # Deferred does not have a result + self.assertNoResult(d) + + redirect_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nLocation: http://testserv:8008/ab/c1/2345.txt\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n" + protocol.dataReceived( + b"HTTP/1.1 200 OK\r\n" + b"Server: Fake\r\n" + b"Content-Length: %i\r\n" + b"Content-Type: multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a\r\n\r\n" + % (len(redirect_data)) + ) + protocol.dataReceived(redirect_data) + + # Still no result, not followed the redirect yet + self.assertNoResult(d) + + # Now send the response returned by the server at `Location` + clients = self.reactor.tcpClients + (host, port, factory, _timeout, _bindAddress) = clients[1] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8008) + protocol = factory.buildProtocol(None) + transport = StringTransport() + protocol.makeConnection(transport) + + # make sure the length is longer than the initial response + data = b"Hello world!" * 30 + protocol.dataReceived( + b"HTTP/1.1 200 OK\r\n" + b"Server: Fake\r\n" + b"Content-Length: %i\r\n" + b"Content-Type: text/plain\r\n" + b"\r\n" + b"%s\r\n" + b"\r\n" % (len(data), data) + ) + + # We should get a successful response + length, _, _ = self.successResultOf(d) + self.assertEqual(length, len(data)) + self.assertEqual(output_stream.getvalue(), data) + @parameterized.expand(["get_json", "post_json", "delete_json", "put_json"]) def test_timeout_reading_body(self, method_name: str) -> None: """ @@ -358,8 +436,7 @@ class FederationClientTests(HomeserverTestCase): # Send it the HTTP response client.dataReceived( - b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n" - b"Server: Fake\r\n\r\n" + b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nServer: Fake\r\n\r\n" ) # Push by enough to time it out @@ -613,10 +690,7 @@ class FederationClientTests(HomeserverTestCase): # Send it a huge HTTP response protocol.dataReceived( - b"HTTP/1.1 200 OK\r\n" - b"Server: Fake\r\n" - b"Content-Type: application/json\r\n" - b"\r\n" + b"HTTP/1.1 200 OK\r\nServer: Fake\r\nContent-Type: application/json\r\n\r\n" ) self.pump() @@ -817,21 +891,30 @@ class FederationClientProxyTests(BaseMultiWorkerStreamTestCase): ) # Fake `remoteserv:8008` responding to requests - mock_agent_on_federation_sender.request.side_effect = lambda *args, **kwargs: defer.succeed( - FakeResponse( - code=200, - body=b'{"foo": "bar"}', - headers=Headers( - { - "Content-Type": ["application/json"], - "Connection": ["close, X-Foo, X-Bar"], - # Should be removed because it's defined in the `Connection` header - "X-Foo": ["foo"], - "X-Bar": ["bar"], - # Should be removed because it's a hop-by-hop header - "Proxy-Authorization": "abcdef", - } - ), + mock_agent_on_federation_sender.request.side_effect = ( + lambda *args, **kwargs: defer.succeed( + FakeResponse( + code=200, + body=b'{"foo": "bar"}', + headers=Headers( + { + "Content-Type": ["application/json"], + "X-Test": ["test"], + # Define some hop-by-hop headers (try with varying casing to + # make sure we still match-up the headers) + "Connection": ["close, X-fOo, X-Bar, X-baz"], + # Should be removed because it's defined in the `Connection` header + "X-Foo": ["foo"], + "X-Bar": ["bar"], + # (not in canonical case) + "x-baZ": ["baz"], + # Should be removed because it's a hop-by-hop header + "Proxy-Authorization": "abcdef", + # Should be removed because it's a hop-by-hop header (not in canonical case) + "transfer-EnCoDiNg": "abcdef", + } + ), + ) ) ) @@ -858,9 +941,17 @@ class FederationClientProxyTests(BaseMultiWorkerStreamTestCase): header_names = set(headers.keys()) # Make sure the response does not include the hop-by-hop headers - self.assertNotIn(b"X-Foo", header_names) - self.assertNotIn(b"X-Bar", header_names) - self.assertNotIn(b"Proxy-Authorization", header_names) + self.assertIncludes( + header_names, + { + b"Content-Type", + b"X-Test", + # Default headers from Twisted + b"Date", + b"Server", + }, + exact=True, + ) # Make sure the response is as expected back on the main worker self.assertEqual(res, {"foo": "bar"}) diff --git a/tests/http/test_proxy.py b/tests/http/test_proxy.py
index 5895270494..7110dcf9f9 100644 --- a/tests/http/test_proxy.py +++ b/tests/http/test_proxy.py
@@ -22,27 +22,42 @@ from typing import Set from parameterized import parameterized -from synapse.http.proxy import parse_connection_header_value +from synapse.http.proxy import ( + HOP_BY_HOP_HEADERS_LOWERCASE, + parse_connection_header_value, +) from tests.unittest import TestCase +def mix_case(s: str) -> str: + """ + Mix up the case of each character in the string (upper or lower case) + """ + return "".join(c.upper() if i % 2 == 0 else c.lower() for i, c in enumerate(s)) + + class ProxyTests(TestCase): @parameterized.expand( [ - [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}], + [b"close, X-Foo, X-Bar", {"close", "x-foo", "x-bar"}], # No whitespace - [b"close,X-Foo,X-Bar", {"Close", "X-Foo", "X-Bar"}], + [b"close,X-Foo,X-Bar", {"close", "x-foo", "x-bar"}], # More whitespace - [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}], + [b"close, X-Foo, X-Bar", {"close", "x-foo", "x-bar"}], # "close" directive in not the first position - [b"X-Foo, X-Bar, close", {"X-Foo", "X-Bar", "Close"}], + [b"X-Foo, X-Bar, close", {"x-foo", "x-bar", "close"}], # Normalizes header capitalization - [b"keep-alive, x-fOo, x-bAr", {"Keep-Alive", "X-Foo", "X-Bar"}], + [b"keep-alive, x-fOo, x-bAr", {"keep-alive", "x-foo", "x-bar"}], # Handles header names with whitespace [ b"keep-alive, x foo, x bar", - {"Keep-Alive", "X foo", "X bar"}, + {"keep-alive", "x foo", "x bar"}, + ], + # Make sure we handle all of the hop-by-hop headers + [ + mix_case(", ".join(HOP_BY_HOP_HEADERS_LOWERCASE)).encode("ascii"), + HOP_BY_HOP_HEADERS_LOWERCASE, ], ] ) @@ -54,7 +69,8 @@ class ProxyTests(TestCase): """ Tests that the connection header value is parsed correctly """ - self.assertEqual( + self.assertIncludes( expected_extra_headers_to_remove, parse_connection_header_value(connection_header_value), + exact=True, ) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py
index f71e4c2b8f..80b0856a56 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py
@@ -854,7 +854,7 @@ class MatrixFederationAgentTests(TestCase): def test_proxy_with_no_scheme(self) -> None: http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) proxy_ep = checked_cast(HostnameEndpoint, http_proxy_agent.http_proxy_endpoint) - self.assertEqual(proxy_ep._hostStr, "proxy.com") + self.assertEqual(proxy_ep._hostText, "proxy.com") self.assertEqual(proxy_ep._port, 8888) @patch.dict(os.environ, {"http_proxy": "socks://proxy.com:8888"}) @@ -866,14 +866,14 @@ class MatrixFederationAgentTests(TestCase): def test_proxy_with_http_scheme(self) -> None: http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) proxy_ep = checked_cast(HostnameEndpoint, http_proxy_agent.http_proxy_endpoint) - self.assertEqual(proxy_ep._hostStr, "proxy.com") + self.assertEqual(proxy_ep._hostText, "proxy.com") self.assertEqual(proxy_ep._port, 8888) @patch.dict(os.environ, {"http_proxy": "https://proxy.com:8888"}) def test_proxy_with_https_scheme(self) -> None: https_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) proxy_ep = checked_cast(_WrapperEndpoint, https_proxy_agent.http_proxy_endpoint) - self.assertEqual(proxy_ep._wrappedEndpoint._hostStr, "proxy.com") + self.assertEqual(proxy_ep._wrappedEndpoint._hostText, "proxy.com") self.assertEqual(proxy_ep._wrappedEndpoint._port, 8888) diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py
index 18af2735fe..db39ecf244 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py
@@ -76,7 +76,7 @@ class TestServletUtils(unittest.TestCase): # Invalid UTF-8. with self.assertRaises(SynapseError): - parse_json_value_from_request(make_request(b"\xFF\x00")) + parse_json_value_from_request(make_request(b"\xff\x00")) # Invalid JSON. with self.assertRaises(SynapseError): diff --git a/tests/http/test_site.py b/tests/http/test_site.py
index bfa26a329c..fc620c705a 100644 --- a/tests/http/test_site.py +++ b/tests/http/test_site.py
@@ -90,3 +90,56 @@ class SynapseRequestTestCase(HomeserverTestCase): # default max upload size is 50M, so it should drop on the next buffer after # that. self.assertEqual(sent, 50 * 1024 * 1024 + 1024) + + def test_content_type_multipart(self) -> None: + """HTTP POST requests with `content-type: multipart/form-data` should be rejected""" + self.hs.start_listening() + + # find the HTTP server which is configured to listen on port 0 + (port, factory, _backlog, interface) = self.reactor.tcpServers[0] + self.assertEqual(interface, "::") + self.assertEqual(port, 0) + + # as a control case, first send a regular request. + + # complete the connection and wire it up to a fake transport + client_address = IPv6Address("TCP", "::1", 2345) + protocol = factory.buildProtocol(client_address) + transport = StringTransport() + protocol.makeConnection(transport) + + protocol.dataReceived( + b"POST / HTTP/1.1\r\n" + b"Connection: close\r\n" + b"Transfer-Encoding: chunked\r\n" + b"\r\n" + b"0\r\n" + b"\r\n" + ) + + while not transport.disconnecting: + self.reactor.advance(1) + + # we should get a 404 + self.assertRegex(transport.value().decode(), r"^HTTP/1\.1 404 ") + + # now send request with content-type header + protocol = factory.buildProtocol(client_address) + transport = StringTransport() + protocol.makeConnection(transport) + + protocol.dataReceived( + b"POST / HTTP/1.1\r\n" + b"Connection: close\r\n" + b"Transfer-Encoding: chunked\r\n" + b"Content-Type: multipart/form-data\r\n" + b"\r\n" + b"0\r\n" + b"\r\n" + ) + + while not transport.disconnecting: + self.reactor.advance(1) + + # we should get a 415 + self.assertRegex(transport.value().decode(), r"^HTTP/1\.1 415 ") diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py
index ff85e067b7..33b94cf9fa 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py
@@ -164,7 +164,6 @@ class TerseJsonTestCase(LoggerCleanupMixin, TestCase): site.site_tag = "test-site" site.server_version_string = "Server v1" site.reactor = Mock() - site.experimental_cors_msc3886 = False request = SynapseRequest( cast(HTTPChannel, FakeChannel(site, self.reactor)), site ) diff --git a/tests/media/test_media_retention.py b/tests/media/test_media_retention.py
index 417d17ebd2..d8f4f57c8c 100644 --- a/tests/media/test_media_retention.py +++ b/tests/media/test_media_retention.py
@@ -31,6 +31,9 @@ from synapse.rest.client import login, register, room from synapse.server import HomeServer from synapse.types import UserID from synapse.util import Clock +from synapse.util.stringutils import ( + random_string, +) from tests import unittest from tests.unittest import override_config @@ -65,7 +68,6 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase): # quarantined media) into both the local store and the remote cache, plus # one additional local media that is marked as protected from quarantine. media_repository = hs.get_media_repository() - test_media_content = b"example string" def _create_media_and_set_attributes( last_accessed_ms: Optional[int], @@ -73,12 +75,14 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase): is_protected: Optional[bool] = False, ) -> MXCUri: # "Upload" some media to the local media store + # If the meda + random_content = bytes(random_string(24), "utf-8") mxc_uri: MXCUri = self.get_success( media_repository.create_content( media_type="text/plain", upload_name=None, - content=io.BytesIO(test_media_content), - content_length=len(test_media_content), + content=io.BytesIO(random_content), + content_length=len(random_content), auth_user=UserID.from_string(test_user_id), ) ) @@ -129,6 +133,7 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase): time_now_ms=clock.time_msec(), upload_name="testfile.txt", filesystem_id="abcdefg12345", + sha256=random_string(24), ) ) diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py
index e55001fb40..2f7cf4569b 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py
@@ -23,14 +23,13 @@ import shutil import tempfile from binascii import unhexlify from io import BytesIO -from typing import Any, BinaryIO, ClassVar, Dict, List, Optional, Tuple, Union +from typing import Any, BinaryIO, ClassVar, Dict, List, Literal, Optional, Tuple, Union from unittest.mock import MagicMock, Mock, patch from urllib import parse import attr from parameterized import parameterized, parameterized_class from PIL import Image as Image -from typing_extensions import Literal from twisted.internet import defer from twisted.internet.defer import Deferred @@ -43,6 +42,7 @@ from twisted.web.resource import Resource from synapse.api.errors import Codes, HttpResponseException from synapse.api.ratelimiting import Ratelimiter from synapse.events import EventBase +from synapse.http.client import ByteWriteable from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable from synapse.media._base import FileInfo, ThumbnailInfo @@ -60,7 +60,7 @@ from synapse.util import Clock from tests import unittest from tests.server import FakeChannel -from tests.test_utils import SMALL_PNG +from tests.test_utils import SMALL_CMYK_JPEG, SMALL_PNG, SMALL_PNG_SHA256 from tests.unittest import override_config from tests.utils import default_config @@ -187,10 +187,70 @@ small_png_with_transparency = TestImage( # different versions of Pillow. ) -small_lossless_webp = TestImage( +small_cmyk_jpeg = TestImage( + SMALL_CMYK_JPEG, + b"image/jpeg", + b".jpeg", + # These values were sourced simply by seeing at what the tests produced at + # the time of writing. If this changes, the tests will fail. + unhexlify( + b"ffd8ffe000104a46494600010100000100010000ffdb00430006" + b"040506050406060506070706080a100a0a09090a140e0f0c1017" + b"141818171416161a1d251f1a1b231c1616202c20232627292a29" + b"191f2d302d283025282928ffdb0043010707070a080a130a0a13" + b"281a161a28282828282828282828282828282828282828282828" + b"2828282828282828282828282828282828282828282828282828" + b"2828ffc00011080020002003012200021101031101ffc4001f00" + b"0001050101010101010000000000000000010203040506070809" + b"0a0bffc400b5100002010303020403050504040000017d010203" + b"00041105122131410613516107227114328191a1082342b1c115" + b"52d1f02433627282090a161718191a25262728292a3435363738" + b"393a434445464748494a535455565758595a636465666768696a" + b"737475767778797a838485868788898a92939495969798999aa2" + b"a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9bac2c3c4c5c6c7c8c9ca" + b"d2d3d4d5d6d7d8d9dae1e2e3e4e5e6e7e8e9eaf1f2f3f4f5f6f7" + b"f8f9faffc4001f01000301010101010101010100000000000001" + b"02030405060708090a0bffc400b5110002010204040304070504" + b"0400010277000102031104052131061241510761711322328108" + b"144291a1b1c109233352f0156272d10a162434e125f11718191a" + b"262728292a35363738393a434445464748494a53545556575859" + b"5a636465666768696a737475767778797a82838485868788898a" + b"92939495969798999aa2a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9" + b"bac2c3c4c5c6c7c8c9cad2d3d4d5d6d7d8d9dae2e3e4e5e6e7e8" + b"e9eaf2f3f4f5f6f7f8f9faffda000c03010002110311003f00fa" + b"a68a28a0028a28a0028a28a0028a28a00fffd9" + ), unhexlify( - b"524946461a000000574542505650384c0d0000002f0000001007" b"1011118888fe0700" + b"ffd8ffe000104a46494600010100000100010000ffdb00430006" + b"040506050406060506070706080a100a0a09090a140e0f0c1017" + b"141818171416161a1d251f1a1b231c1616202c20232627292a29" + b"191f2d302d283025282928ffdb0043010707070a080a130a0a13" + b"281a161a28282828282828282828282828282828282828282828" + b"2828282828282828282828282828282828282828282828282828" + b"2828ffc00011080001000103012200021101031101ffc4001f00" + b"0001050101010101010000000000000000010203040506070809" + b"0a0bffc400b5100002010303020403050504040000017d010203" + b"00041105122131410613516107227114328191a1082342b1c115" + b"52d1f02433627282090a161718191a25262728292a3435363738" + b"393a434445464748494a535455565758595a636465666768696a" + b"737475767778797a838485868788898a92939495969798999aa2" + b"a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9bac2c3c4c5c6c7c8c9ca" + b"d2d3d4d5d6d7d8d9dae1e2e3e4e5e6e7e8e9eaf1f2f3f4f5f6f7" + b"f8f9faffc4001f01000301010101010101010100000000000001" + b"02030405060708090a0bffc400b5110002010204040304070504" + b"0400010277000102031104052131061241510761711322328108" + b"144291a1b1c109233352f0156272d10a162434e125f11718191a" + b"262728292a35363738393a434445464748494a53545556575859" + b"5a636465666768696a737475767778797a82838485868788898a" + b"92939495969798999aa2a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9" + b"bac2c3c4c5c6c7c8c9cad2d3d4d5d6d7d8d9dae2e3e4e5e6e7e8" + b"e9eaf2f3f4f5f6f7f8f9faffda000c03010002110311003f00fa" + b"a68a28a00fffd9" ), +) + +small_lossless_webp = TestImage( + unhexlify(b"524946461a000000574542505650384c0d0000002f00000010071011118888fe0700"), b"image/webp", b".webp", ) @@ -261,7 +321,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): """A mock for MatrixFederationHttpClient.get_file.""" def write_to( - r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]] + r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]], ) -> Tuple[int, Dict[bytes, List[bytes]]]: data, response = r output_stream.write(data) @@ -357,6 +417,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): return channel + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_handle_missing_content_type(self) -> None: channel = self._req( b"attachment; filename=out" + self.test_image.extension, @@ -368,6 +433,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): headers.getRawHeaders(b"Content-Type"), [b"application/octet-stream"] ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_disposition_filename_ascii(self) -> None: """ If the filename is filename=<ascii> then Synapse will decode it as an @@ -388,6 +458,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): ], ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_disposition_filenamestar_utf8escaped(self) -> None: """ If the filename is filename=*utf8''<utf8 escaped> then Synapse will @@ -413,6 +488,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): ], ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_disposition_none(self) -> None: """ If there is no filename, Content-Disposition should only @@ -429,6 +509,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): [b"inline" if self.test_image.is_inline else b"attachment"], ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_thumbnail_crop(self) -> None: """Test that a cropped remote thumbnail is available.""" self._test_thumbnail( @@ -438,6 +523,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_thumbnail_scale(self) -> None: """Test that a scaled remote thumbnail is available.""" self._test_thumbnail( @@ -447,6 +537,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_invalid_type(self) -> None: """An invalid thumbnail type is never available.""" self._test_thumbnail( @@ -457,7 +552,10 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) @unittest.override_config( - {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]} + { + "thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}], + "enable_authenticated_media": False, + }, ) def test_no_thumbnail_crop(self) -> None: """ @@ -471,7 +569,10 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) @unittest.override_config( - {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]} + { + "thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}], + "enable_authenticated_media": False, + } ) def test_no_thumbnail_scale(self) -> None: """ @@ -484,6 +585,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_thumbnail_repeated_thumbnail(self) -> None: """Test that fetching the same thumbnail works, and deleting the on disk thumbnail regenerates it. @@ -658,6 +764,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_x_robots_tag_header(self) -> None: """ Tests that the `X-Robots-Tag` header is present, which informs web crawlers @@ -671,6 +782,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): [b"noindex, nofollow, noarchive, noimageindex"], ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_cross_origin_resource_policy_header(self) -> None: """ Test that the Cross-Origin-Resource-Policy header is set to "cross-origin" @@ -685,6 +801,11 @@ class MediaRepoTests(unittest.HomeserverTestCase): [b"cross-origin"], ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_unknown_v3_endpoint(self) -> None: """ If the v3 endpoint fails, try the r0 one. @@ -923,6 +1044,11 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): d.callback(52428800) return d + @override_config( + { + "enable_authenticated_media": False, + } + ) @patch( "synapse.http.matrixfederationclient.read_body_with_max_size", read_body_with_max_size_30MiB, @@ -998,6 +1124,7 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): { "remote_media_download_per_second": "50M", "remote_media_download_burst_count": "50M", + "enable_authenticated_media": False, } ) @patch( @@ -1057,7 +1184,12 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel.code == 200 - @override_config({"remote_media_download_burst_count": "87M"}) + @override_config( + { + "remote_media_download_burst_count": "87M", + "enable_authenticated_media": False, + } + ) @patch( "synapse.http.matrixfederationclient.read_body_with_max_size", read_body_with_max_size_30MiB, @@ -1097,7 +1229,7 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel2.code == 429 - @override_config({"max_upload_size": "29M"}) + @override_config({"max_upload_size": "29M", "enable_authenticated_media": False}) @patch( "synapse.http.matrixfederationclient.read_body_with_max_size", read_body_with_max_size_30MiB, @@ -1124,3 +1256,146 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): ) assert channel.code == 502 assert channel.json_body["errcode"] == "M_TOO_LARGE" + + +def read_body( + response: IResponse, stream: ByteWriteable, max_size: Optional[int] +) -> Deferred: + d: Deferred = defer.Deferred() + stream.write(SMALL_PNG) + d.callback(len(SMALL_PNG)) + return d + + +class MediaHashesTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + media.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.user = self.register_user("user", "pass") + self.tok = self.login("user", "pass") + self.store = hs.get_datastores().main + self.client = hs.get_federation_http_client() + + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + + def test_ensure_correct_sha256(self) -> None: + """Check that the hash does not change""" + media = self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200) + mxc = media.get("content_uri") + assert mxc + store_media = self.get_success(self.store.get_local_media(mxc[11:])) + assert store_media + self.assertEqual( + store_media.sha256, + SMALL_PNG_SHA256, + ) + + def test_ensure_multiple_correct_sha256(self) -> None: + """Check that two media items have the same hash.""" + media_a = self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200) + mxc_a = media_a.get("content_uri") + assert mxc_a + store_media_a = self.get_success(self.store.get_local_media(mxc_a[11:])) + assert store_media_a + + media_b = self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200) + mxc_b = media_b.get("content_uri") + assert mxc_b + store_media_b = self.get_success(self.store.get_local_media(mxc_b[11:])) + assert store_media_b + + self.assertNotEqual( + store_media_a.media_id, + store_media_b.media_id, + ) + self.assertEqual( + store_media_a.sha256, + store_media_b.sha256, + ) + + @override_config( + { + "enable_authenticated_media": False, + } + ) + # mock actually reading file body + @patch( + "synapse.http.matrixfederationclient.read_body_with_max_size", + read_body, + ) + def test_ensure_correct_sha256_federated(self) -> None: + """Check that federated media have the same hash.""" + + # Mock getting a file over federation + async def _send_request(*args: Any, **kwargs: Any) -> IResponse: + resp = MagicMock(spec=IResponse) + resp.code = 200 + resp.length = 500 + resp.headers = Headers({"Content-Type": ["application/octet-stream"]}) + resp.phrase = b"OK" + return resp + + self.client._send_request = _send_request # type: ignore + + # first request should go through + channel = self.make_request( + "GET", + "/_matrix/media/v3/download/remote.org/abc", + shorthand=False, + access_token=self.tok, + ) + assert channel.code == 200 + store_media = self.get_success( + self.store.get_cached_remote_media("remote.org", "abc") + ) + assert store_media + self.assertEqual( + store_media.sha256, + SMALL_PNG_SHA256, + ) + + +class MediaRepoSizeModuleCallbackTestCase(unittest.HomeserverTestCase): + servlets = [ + login.register_servlets, + admin.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.user = self.register_user("user", "pass") + self.tok = self.login("user", "pass") + self.mock_result = True # Allow all uploads by default + + hs.get_module_api().register_media_repository_callbacks( + is_user_allowed_to_upload_media_of_size=self.is_user_allowed_to_upload_media_of_size, + ) + + def create_resource_dict(self) -> Dict[str, Resource]: + resources = super().create_resource_dict() + resources["/_matrix/media"] = self.hs.get_media_repository_resource() + return resources + + async def is_user_allowed_to_upload_media_of_size( + self, user_id: str, size: int + ) -> bool: + self.last_user_id = user_id + self.last_size = size + return self.mock_result + + def test_upload_allowed(self) -> None: + self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=200) + assert self.last_user_id == self.user + assert self.last_size == len(SMALL_PNG) + + def test_upload_not_allowed(self) -> None: + self.mock_result = False + self.helper.upload_media(SMALL_PNG, tok=self.tok, expect_code=413) + assert self.last_user_id == self.user + assert self.last_size == len(SMALL_PNG) diff --git a/tests/media/test_oembed.py b/tests/media/test_oembed.py
index 29d4580697..b8265ff9ca 100644 --- a/tests/media/test_oembed.py +++ b/tests/media/test_oembed.py
@@ -20,6 +20,7 @@ # import json +from typing import Any from parameterized import parameterized @@ -52,6 +53,7 @@ class OEmbedTests(HomeserverTestCase): def test_version(self) -> None: """Accept versions that are similar to 1.0 as a string or int (or missing).""" + version: Any for version in ("1.0", 1.0, 1): result = self.parse_response({"version": version}) # An empty Open Graph response is an error, ensure the URL is included. @@ -69,6 +71,7 @@ class OEmbedTests(HomeserverTestCase): def test_cache_age(self) -> None: """Ensure a cache-age is parsed properly.""" + cache_age: Any # Correct-ish cache ages are allowed. for cache_age in ("1", 1.0, 1): result = self.parse_response({"cache_age": cache_age}) diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py
index 80f24814e8..2e7004df3a 100644 --- a/tests/metrics/test_metrics.py +++ b/tests/metrics/test_metrics.py
@@ -19,12 +19,11 @@ # # from importlib import metadata -from typing import Dict, Tuple +from typing import Dict, Protocol, Tuple from unittest.mock import patch from pkg_resources import parse_version from prometheus_client.core import Sample -from typing_extensions import Protocol from synapse.app._base import _set_prometheus_client_use_created_metrics from synapse.metrics import REGISTRY, InFlightGauge, generate_latest diff --git a/tests/metrics/test_phone_home_stats.py b/tests/metrics/test_phone_home_stats.py new file mode 100644
index 0000000000..5339d649df --- /dev/null +++ b/tests/metrics/test_phone_home_stats.py
@@ -0,0 +1,263 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. + +import logging +from unittest.mock import AsyncMock + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.app.phone_stats_home import ( + PHONE_HOME_INTERVAL_SECONDS, + start_phone_stats_home, +) +from synapse.rest import admin, login, register, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests import unittest +from tests.server import ThreadedMemoryReactorClock + +TEST_REPORT_STATS_ENDPOINT = "https://fake.endpoint/stats" +TEST_SERVER_CONTEXT = "test-server-context" + + +class PhoneHomeStatsTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets_for_client_rest_resource, + room.register_servlets, + register.register_servlets, + login.register_servlets, + ] + + def make_homeserver( + self, reactor: ThreadedMemoryReactorClock, clock: Clock + ) -> HomeServer: + # Configure the homeserver to enable stats reporting. + config = self.default_config() + config["report_stats"] = True + config["report_stats_endpoint"] = TEST_REPORT_STATS_ENDPOINT + + # Configure the server context so we can check it ends up being reported + config["server_context"] = TEST_SERVER_CONTEXT + + # Allow guests to be registered + config["allow_guest_access"] = True + + hs = self.setup_test_homeserver(config=config) + + # Replace the proxied http client with a mock, so we can inspect outbound requests to + # the configured stats endpoint. + self.put_json_mock = AsyncMock(return_value={}) + hs.get_proxied_http_client().put_json = self.put_json_mock # type: ignore[method-assign] + return hs + + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + self.store = homeserver.get_datastores().main + + # Wait for the background updates to add the database triggers that keep the + # `event_stats` table up-to-date. + self.wait_for_background_updates() + + # Force stats reporting to occur + start_phone_stats_home(hs=homeserver) + + super().prepare(reactor, clock, homeserver) + + def _get_latest_phone_home_stats(self) -> JsonDict: + # Wait for `phone_stats_home` to be called again + a healthy margin (50s). + self.reactor.advance(2 * PHONE_HOME_INTERVAL_SECONDS + 50) + + # Extract the reported stats from our http client mock + mock_calls = self.put_json_mock.call_args_list + report_stats_calls = [] + for call in mock_calls: + if call.args[0] == TEST_REPORT_STATS_ENDPOINT: + report_stats_calls.append(call) + + self.assertGreaterEqual( + (len(report_stats_calls)), + 1, + "Expected at-least one call to the report_stats endpoint", + ) + + # Extract the phone home stats from the call + phone_home_stats = report_stats_calls[0].args[1] + + return phone_home_stats + + def _perform_user_actions(self) -> None: + """ + Perform some actions on the homeserver that would bump the phone home + stats. + + This creates a few users (including a guest), a room, and sends some messages. + Expected number of events: + - 10 unencrypted messages + - 5 encrypted messages + - 24 total events (including room state, etc) + """ + + # Create some users + user_1_mxid = self.register_user( + username="test_user_1", + password="test", + ) + user_2_mxid = self.register_user( + username="test_user_2", + password="test", + ) + # Note: `self.register_user` does not support guest registration, and updating the + # Admin API it calls to add a new parameter would cause the `mac` parameter to fail + # in a backwards-incompatible manner. Hence, we make a manual request here. + _guest_user_mxid = self.make_request( + method="POST", + path="/_matrix/client/v3/register?kind=guest", + content={ + "username": "guest_user", + "password": "test", + }, + shorthand=False, + ) + + # Log in to each user + user_1_token = self.login(username=user_1_mxid, password="test") + user_2_token = self.login(username=user_2_mxid, password="test") + + # Create a room between the two users + room_1_id = self.helper.create_room_as( + is_public=False, + tok=user_1_token, + ) + + # Mark this room as end-to-end encrypted + self.helper.send_state( + room_id=room_1_id, + event_type="m.room.encryption", + body={ + "algorithm": "m.megolm.v1.aes-sha2", + "rotation_period_ms": 604800000, + "rotation_period_msgs": 100, + }, + state_key="", + tok=user_1_token, + ) + + # User 1 invites user 2 + self.helper.invite( + room=room_1_id, + src=user_1_mxid, + targ=user_2_mxid, + tok=user_1_token, + ) + + # User 2 joins + self.helper.join( + room=room_1_id, + user=user_2_mxid, + tok=user_2_token, + ) + + # User 1 sends 10 unencrypted messages + for _ in range(10): + self.helper.send( + room_id=room_1_id, + body="Zoinks Scoob! A message!", + tok=user_1_token, + ) + + # User 2 sends 5 encrypted "messages" + for _ in range(5): + self.helper.send_event( + room_id=room_1_id, + type="m.room.encrypted", + content={ + "algorithm": "m.olm.v1.curve25519-aes-sha2", + "sender_key": "some_key", + "ciphertext": { + "some_key": { + "type": 0, + "body": "encrypted_payload", + }, + }, + }, + tok=user_2_token, + ) + + def test_phone_home_stats(self) -> None: + """ + Test that the phone home stats contain the stats we expect based on + the scenario carried out in `prepare` + """ + # Do things to bump the stats + self._perform_user_actions() + + # Wait for the stats to be reported + phone_home_stats = self._get_latest_phone_home_stats() + + self.assertEqual( + phone_home_stats["homeserver"], self.hs.config.server.server_name + ) + + self.assertTrue(isinstance(phone_home_stats["memory_rss"], int)) + self.assertTrue(isinstance(phone_home_stats["cpu_average"], int)) + + self.assertEqual(phone_home_stats["server_context"], TEST_SERVER_CONTEXT) + + self.assertTrue(isinstance(phone_home_stats["timestamp"], int)) + self.assertTrue(isinstance(phone_home_stats["uptime_seconds"], int)) + self.assertTrue(isinstance(phone_home_stats["python_version"], str)) + + # We expect only our test users to exist on the homeserver + self.assertEqual(phone_home_stats["total_users"], 3) + self.assertEqual(phone_home_stats["total_nonbridged_users"], 3) + self.assertEqual(phone_home_stats["daily_user_type_native"], 2) + self.assertEqual(phone_home_stats["daily_user_type_guest"], 1) + self.assertEqual(phone_home_stats["daily_user_type_bridged"], 0) + self.assertEqual(phone_home_stats["total_room_count"], 1) + self.assertEqual(phone_home_stats["daily_active_users"], 2) + self.assertEqual(phone_home_stats["monthly_active_users"], 2) + self.assertEqual(phone_home_stats["daily_active_rooms"], 1) + self.assertEqual(phone_home_stats["daily_active_e2ee_rooms"], 1) + self.assertEqual(phone_home_stats["daily_messages"], 10) + self.assertEqual(phone_home_stats["daily_e2ee_messages"], 5) + self.assertEqual(phone_home_stats["daily_sent_messages"], 10) + self.assertEqual(phone_home_stats["daily_sent_e2ee_messages"], 5) + + # Our users have not been around for >30 days, hence these are all 0. + self.assertEqual(phone_home_stats["r30v2_users_all"], 0) + self.assertEqual(phone_home_stats["r30v2_users_android"], 0) + self.assertEqual(phone_home_stats["r30v2_users_ios"], 0) + self.assertEqual(phone_home_stats["r30v2_users_electron"], 0) + self.assertEqual(phone_home_stats["r30v2_users_web"], 0) + self.assertEqual( + phone_home_stats["cache_factor"], self.hs.config.caches.global_factor + ) + self.assertEqual( + phone_home_stats["event_cache_size"], + self.hs.config.caches.event_cache_size, + ) + self.assertEqual( + phone_home_stats["database_engine"], + self.hs.config.database.databases[0].config["name"], + ) + self.assertEqual( + phone_home_stats["database_server_version"], + self.hs.get_datastores().main.database_engine.server_version, + ) + + synapse_logger = logging.getLogger("synapse") + log_level = synapse_logger.getEffectiveLevel() + self.assertEqual(phone_home_stats["log_level"], logging.getLevelName(log_level)) diff --git a/tests/module_api/test_account_data_manager.py b/tests/module_api/test_account_data_manager.py
index fd87eaffd0..1a1d5609b2 100644 --- a/tests/module_api/test_account_data_manager.py +++ b/tests/module_api/test_account_data_manager.py
@@ -164,6 +164,8 @@ class ModuleApiTestCase(HomeserverTestCase): # noinspection PyTypeChecker self.get_success_or_raise( self._module_api.account_data_manager.put_global( - self.user_id, "test.data", 42 # type: ignore[arg-type] + self.user_id, + "test.data", + 42, # type: ignore[arg-type] ) ) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index b6ba472d7d..85bd76b78f 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py
@@ -87,7 +87,7 @@ class ModuleApiTestCase(BaseModuleApiTestCase): # Register a new user user_id, access_token = self.get_success( self.module_api.register( - "bob", displayname="Bobberino", emails=["bob@bobinator.bob"] + "bob", displayname="Bobberino" ) ) @@ -96,18 +96,6 @@ class ModuleApiTestCase(BaseModuleApiTestCase): self.assertTrue(access_token) self.assertTrue(self.get_success(self.store.get_user_by_id(user_id))) - # Check that the email was assigned - emails = self.get_success(self.store.user_get_threepids(user_id)) - self.assertEqual(len(emails), 1) - - email = emails[0] - self.assertEqual(email.medium, "email") - self.assertEqual(email.address, "bob@bobinator.bob") - - # Should these be 0? - self.assertEqual(email.validated_at, 0) - self.assertEqual(email.added_at, 0) - # Check that the displayname was assigned displayname = self.get_success( self.store.get_profile_displayname(UserID.from_string("@bob:test")) diff --git a/tests/module_api/test_spamchecker.py b/tests/module_api/test_spamchecker.py new file mode 100644
index 0000000000..926fe30b43 --- /dev/null +++ b/tests/module_api/test_spamchecker.py
@@ -0,0 +1,244 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# +from typing import Literal, Union + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.config.server import DEFAULT_ROOM_VERSION +from synapse.rest import admin, login, room, room_upgrade_rest_servlet +from synapse.server import HomeServer +from synapse.types import Codes, JsonDict +from synapse.util import Clock + +from tests.server import FakeChannel +from tests.unittest import HomeserverTestCase + + +class SpamCheckerTestCase(HomeserverTestCase): + servlets = [ + room.register_servlets, + admin.register_servlets, + login.register_servlets, + room_upgrade_rest_servlet.register_servlets, + ] + + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + self._module_api = homeserver.get_module_api() + self.user_id = self.register_user("user", "password") + self.token = self.login("user", "password") + + def create_room(self, content: JsonDict) -> FakeChannel: + channel = self.make_request( + "POST", + "/_matrix/client/r0/createRoom", + content, + access_token=self.token, + ) + + return channel + + def test_may_user_create_room(self) -> None: + """Test that the may_user_create_room callback is called when a user + creates a room, and that it receives the correct parameters. + """ + + async def user_may_create_room( + user_id: str, room_config: JsonDict + ) -> Union[Literal["NOT_SPAM"], Codes]: + self.last_room_config = room_config + self.last_user_id = user_id + return "NOT_SPAM" + + self._module_api.register_spam_checker_callbacks( + user_may_create_room=user_may_create_room + ) + + channel = self.create_room({"foo": "baa"}) + self.assertEqual(channel.code, 200) + self.assertEqual(self.last_user_id, self.user_id) + self.assertEqual(self.last_room_config["foo"], "baa") + + def test_may_user_create_room_on_upgrade(self) -> None: + """Test that the may_user_create_room callback is called when a room is upgraded.""" + + # First, create a room to upgrade. + channel = self.create_room({"topic": "foo"}) + self.assertEqual(channel.code, 200) + room_id = channel.json_body["room_id"] + + async def user_may_create_room( + user_id: str, room_config: JsonDict + ) -> Union[Literal["NOT_SPAM"], Codes]: + self.last_room_config = room_config + self.last_user_id = user_id + return "NOT_SPAM" + + # Register the callback for spam checking. + self._module_api.register_spam_checker_callbacks( + user_may_create_room=user_may_create_room + ) + + # Now upgrade the room. + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/upgrade", + # This will upgrade a room to the same version, but that's fine. + content={"new_version": DEFAULT_ROOM_VERSION}, + access_token=self.token, + ) + + # Check that the callback was called and the room was upgraded. + self.assertEqual(channel.code, 200) + self.assertEqual(self.last_user_id, self.user_id) + # Check that the initial state received by callback contains the topic event. + self.assertTrue( + any( + event[0][0] == "m.room.topic" and event[1].get("topic") == "foo" + for event in self.last_room_config["initial_state"] + ) + ) + + def test_may_user_create_room_disallowed(self) -> None: + """Test that the codes response from may_user_create_room callback is respected + and returned via the API. + """ + + async def user_may_create_room( + user_id: str, room_config: JsonDict + ) -> Union[Literal["NOT_SPAM"], Codes]: + self.last_room_config = room_config + self.last_user_id = user_id + return Codes.UNAUTHORIZED + + self._module_api.register_spam_checker_callbacks( + user_may_create_room=user_may_create_room + ) + + channel = self.create_room({"foo": "baa"}) + self.assertEqual(channel.code, 403) + self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) + self.assertEqual(self.last_user_id, self.user_id) + self.assertEqual(self.last_room_config["foo"], "baa") + + def test_may_user_create_room_compatibility(self) -> None: + """Test that the may_user_create_room callback is called when a user + creates a room for a module that uses the old callback signature + (without the `room_config` parameter) + """ + + async def user_may_create_room( + user_id: str, + ) -> Union[Literal["NOT_SPAM"], Codes]: + self.last_user_id = user_id + return "NOT_SPAM" + + self._module_api.register_spam_checker_callbacks( + user_may_create_room=user_may_create_room + ) + + channel = self.create_room({"foo": "baa"}) + self.assertEqual(channel.code, 200) + self.assertEqual(self.last_user_id, self.user_id) + + def test_user_may_send_state_event(self) -> None: + """Test that the user_may_send_state_event callback is called when a state event + is sent, and that it receives the correct parameters. + """ + + async def user_may_send_state_event( + user_id: str, + room_id: str, + event_type: str, + state_key: str, + content: JsonDict, + ) -> Union[Literal["NOT_SPAM"], Codes]: + self.last_user_id = user_id + self.last_room_id = room_id + self.last_event_type = event_type + self.last_state_key = state_key + self.last_content = content + return "NOT_SPAM" + + self._module_api.register_spam_checker_callbacks( + user_may_send_state_event=user_may_send_state_event + ) + + channel = self.create_room({}) + self.assertEqual(channel.code, 200) + + room_id = channel.json_body["room_id"] + + event_type = "test.event.type" + state_key = "test.state.key" + channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/state/%s/%s" + % ( + room_id, + event_type, + state_key, + ), + content={"foo": "bar"}, + access_token=self.token, + ) + + self.assertEqual(channel.code, 200) + self.assertEqual(self.last_user_id, self.user_id) + self.assertEqual(self.last_room_id, room_id) + self.assertEqual(self.last_event_type, event_type) + self.assertEqual(self.last_state_key, state_key) + self.assertEqual(self.last_content, {"foo": "bar"}) + + def test_user_may_send_state_event_disallows(self) -> None: + """Test that the user_may_send_state_event callback is called when a state event + is sent, and that the response is honoured. + """ + + async def user_may_send_state_event( + user_id: str, + room_id: str, + event_type: str, + state_key: str, + content: JsonDict, + ) -> Union[Literal["NOT_SPAM"], Codes]: + return Codes.FORBIDDEN + + self._module_api.register_spam_checker_callbacks( + user_may_send_state_event=user_may_send_state_event + ) + + channel = self.create_room({}) + self.assertEqual(channel.code, 200) + + room_id = channel.json_body["room_id"] + + event_type = "test.event.type" + state_key = "test.state.key" + channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/state/%s/%s" + % ( + room_id, + event_type, + state_key, + ), + content={"foo": "bar"}, + access_token=self.token, + ) + + self.assertEqual(channel.code, 403) + self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py
index fc73f3dc2a..16c1292812 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py
@@ -120,9 +120,11 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): # # We have seen stringy and null values for "room" in the wild, so presumably # some of this validation was missing in the past. - with patch("synapse.events.validator.validate_canonicaljson"), patch( - "synapse.events.validator.jsonschema.validate" - ), patch("synapse.handlers.event_auth.check_state_dependent_auth_rules"): + with ( + patch("synapse.events.validator.validate_canonicaljson"), + patch("synapse.events.validator.jsonschema.validate"), + patch("synapse.handlers.event_auth.check_state_dependent_auth_rules"), + ): pl_event_id = self.helper.send_state( self.room_id, "m.room.power_levels", diff --git a/tests/push/test_email.py b/tests/push/test_email.py
index e0aab1c046..66ee6ca20f 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py
@@ -31,9 +31,7 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.errors import Codes, SynapseError -from synapse.push.emailpusher import EmailPusher from synapse.rest.client import login, room -from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource from synapse.server import HomeServer from synapse.util import Clock @@ -44,524 +42,6 @@ from tests.unittest import HomeserverTestCase @attr.s(auto_attribs=True) class _User: "Helper wrapper for user ID and access token" + id: str token: str - - -class EmailPusherTests(HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets_for_client_rest_resource, - room.register_servlets, - login.register_servlets, - ] - hijack_auth = False - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() - config["email"] = { - "enable_notifs": True, - "template_dir": os.path.abspath( - pkg_resources.resource_filename("synapse", "res/templates") - ), - "expiry_template_html": "notice_expiry.html", - "expiry_template_text": "notice_expiry.txt", - "notif_template_html": "notif_mail.html", - "notif_template_text": "notif_mail.txt", - "smtp_host": "127.0.0.1", - "smtp_port": 20, - "require_transport_security": False, - "smtp_user": None, - "smtp_pass": None, - "app_name": "Matrix", - "notif_from": "test@example.com", - "riot_base_url": None, - } - config["public_baseurl"] = "http://aaa" - - hs = self.setup_test_homeserver(config=config) - - # List[Tuple[Deferred, args, kwargs]] - self.email_attempts: List[Tuple[Deferred, Sequence, Dict]] = [] - - def sendmail(*args: Any, **kwargs: Any) -> Deferred: - # This mocks out synapse.reactor.send_email._sendmail. - d: Deferred = Deferred() - self.email_attempts.append((d, args, kwargs)) - return d - - hs.get_send_email_handler()._sendmail = sendmail # type: ignore[assignment] - - return hs - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - # Register the user who gets notified - self.user_id = self.register_user("user", "pass") - self.access_token = self.login("user", "pass") - - # Register other users - self.others = [ - _User( - id=self.register_user("otheruser1", "pass"), - token=self.login("otheruser1", "pass"), - ), - _User( - id=self.register_user("otheruser2", "pass"), - token=self.login("otheruser2", "pass"), - ), - ] - - # Register the pusher - user_tuple = self.get_success( - self.hs.get_datastores().main.get_user_by_access_token(self.access_token) - ) - assert user_tuple is not None - self.device_id = user_tuple.device_id - - # We need to add email to account before we can create a pusher. - self.get_success( - hs.get_datastores().main.user_add_threepid( - self.user_id, "email", "a@example.com", 0, 0 - ) - ) - - pusher = self.get_success( - self.hs.get_pusherpool().add_or_update_pusher( - user_id=self.user_id, - device_id=self.device_id, - kind="email", - app_id="m.email", - app_display_name="Email Notifications", - device_display_name="a@example.com", - pushkey="a@example.com", - lang=None, - data={}, - ) - ) - assert isinstance(pusher, EmailPusher) - self.pusher = pusher - - self.auth_handler = hs.get_auth_handler() - self.store = hs.get_datastores().main - - def test_need_validated_email(self) -> None: - """Test that we can only add an email pusher if the user has validated - their email. - """ - with self.assertRaises(SynapseError) as cm: - self.get_success_or_raise( - self.hs.get_pusherpool().add_or_update_pusher( - user_id=self.user_id, - device_id=self.device_id, - kind="email", - app_id="m.email", - app_display_name="Email Notifications", - device_display_name="b@example.com", - pushkey="b@example.com", - lang=None, - data={}, - ) - ) - - self.assertEqual(400, cm.exception.code) - self.assertEqual(Codes.THREEPID_NOT_FOUND, cm.exception.errcode) - - def test_simple_sends_email(self) -> None: - # Create a simple room with two users - room = self.helper.create_room_as(self.user_id, tok=self.access_token) - self.helper.invite( - room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id - ) - self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) - - # The other user sends a single message. - self.helper.send(room, body="Hi!", tok=self.others[0].token) - - # We should get emailed about that message - self._check_for_mail() - - # The other user sends multiple messages. - self.helper.send(room, body="Hi!", tok=self.others[0].token) - self.helper.send(room, body="There!", tok=self.others[0].token) - - self._check_for_mail() - - @parameterized.expand([(False,), (True,)]) - def test_unsubscribe(self, use_post: bool) -> None: - # Create a simple room with two users - room = self.helper.create_room_as(self.user_id, tok=self.access_token) - self.helper.invite( - room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id - ) - self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) - - # The other user sends a single message. - self.helper.send(room, body="Hi!", tok=self.others[0].token) - - # We should get emailed about that message - args, kwargs = self._check_for_mail() - - # That email should contain an unsubscribe link in the body and header. - msg: bytes = args[5] - - # Multipart: plain text, base 64 encoded; html, base 64 encoded - multipart_msg = email.message_from_bytes(msg) - - # Extract the text (non-HTML) portion of the multipart Message, - # as a Message. - txt_message = multipart_msg.get_payload(i=0) - assert isinstance(txt_message, email.message.Message) - - # Extract the actual bytes from the Message object, and decode them to a `str`. - txt_bytes = txt_message.get_payload(decode=True) - assert isinstance(txt_bytes, bytes) - txt = txt_bytes.decode() - - # Do the same for the HTML portion of the multipart Message. - html_message = multipart_msg.get_payload(i=1) - assert isinstance(html_message, email.message.Message) - html_bytes = html_message.get_payload(decode=True) - assert isinstance(html_bytes, bytes) - html = html_bytes.decode() - - self.assertIn("/_synapse/client/unsubscribe", txt) - self.assertIn("/_synapse/client/unsubscribe", html) - - # The unsubscribe headers should exist. - assert multipart_msg.get("List-Unsubscribe") is not None - self.assertIsNotNone(multipart_msg.get("List-Unsubscribe-Post")) - - # Open the unsubscribe link. - unsubscribe_link = multipart_msg["List-Unsubscribe"].strip("<>") - unsubscribe_resource = UnsubscribeResource(self.hs) - channel = make_request( - self.reactor, - FakeSite(unsubscribe_resource, self.reactor), - "POST" if use_post else "GET", - unsubscribe_link, - shorthand=False, - ) - self.assertEqual(HTTPStatus.OK, channel.code, channel.result) - - # Ensure the pusher was removed. - pushers = list( - self.get_success( - self.hs.get_datastores().main.get_pushers_by( - {"user_name": self.user_id} - ) - ) - ) - self.assertEqual(pushers, []) - - def test_invite_sends_email(self) -> None: - # Create a room and invite the user to it - room = self.helper.create_room_as(self.others[0].id, tok=self.others[0].token) - self.helper.invite( - room=room, - src=self.others[0].id, - tok=self.others[0].token, - targ=self.user_id, - ) - - # We should get emailed about the invite - self._check_for_mail() - - def test_invite_to_empty_room_sends_email(self) -> None: - # Create a room and invite the user to it - room = self.helper.create_room_as(self.others[0].id, tok=self.others[0].token) - self.helper.invite( - room=room, - src=self.others[0].id, - tok=self.others[0].token, - targ=self.user_id, - ) - - # Then have the original user leave - self.helper.leave(room, self.others[0].id, tok=self.others[0].token) - - # We should get emailed about the invite - self._check_for_mail() - - def test_multiple_members_email(self) -> None: - # We want to test multiple notifications, so we pause processing of push - # while we send messages. - self.pusher._pause_processing() - - # Create a simple room with multiple other users - room = self.helper.create_room_as(self.user_id, tok=self.access_token) - - for other in self.others: - self.helper.invite( - room=room, src=self.user_id, tok=self.access_token, targ=other.id - ) - self.helper.join(room=room, user=other.id, tok=other.token) - - # The other users send some messages - self.helper.send(room, body="Hi!", tok=self.others[0].token) - self.helper.send(room, body="There!", tok=self.others[1].token) - self.helper.send(room, body="There!", tok=self.others[1].token) - - # Nothing should have happened yet, as we're paused. - assert not self.email_attempts - - self.pusher._resume_processing() - - # We should get emailed about those messages - self._check_for_mail() - - def test_multiple_rooms(self) -> None: - # We want to test multiple notifications from multiple rooms, so we pause - # processing of push while we send messages. - self.pusher._pause_processing() - - # Create a simple room with multiple other users - rooms = [ - self.helper.create_room_as(self.user_id, tok=self.access_token), - self.helper.create_room_as(self.user_id, tok=self.access_token), - ] - - for r, other in zip(rooms, self.others): - self.helper.invite( - room=r, src=self.user_id, tok=self.access_token, targ=other.id - ) - self.helper.join(room=r, user=other.id, tok=other.token) - - # The other users send some messages - self.helper.send(rooms[0], body="Hi!", tok=self.others[0].token) - self.helper.send(rooms[1], body="There!", tok=self.others[1].token) - self.helper.send(rooms[1], body="There!", tok=self.others[1].token) - - # Nothing should have happened yet, as we're paused. - assert not self.email_attempts - - self.pusher._resume_processing() - - # We should get emailed about those messages - self._check_for_mail() - - def test_room_notifications_include_avatar(self) -> None: - # Create a room and set its avatar. - room = self.helper.create_room_as(self.user_id, tok=self.access_token) - self.helper.send_state( - room, "m.room.avatar", {"url": "mxc://DUMMY_MEDIA_ID"}, self.access_token - ) - - # Invite two other uses. - for other in self.others: - self.helper.invite( - room=room, src=self.user_id, tok=self.access_token, targ=other.id - ) - self.helper.join(room=room, user=other.id, tok=other.token) - - # The other users send some messages. - # TODO It seems that two messages are required to trigger an email? - self.helper.send(room, body="Alpha", tok=self.others[0].token) - self.helper.send(room, body="Beta", tok=self.others[1].token) - - # We should get emailed about those messages - args, kwargs = self._check_for_mail() - - # That email should contain the room's avatar - msg: bytes = args[5] - # Multipart: plain text, base 64 encoded; html, base 64 encoded - - # Extract the html Message object from the Multipart Message. - # We need the asserts to convince mypy that this is OK. - html_message = email.message_from_bytes(msg).get_payload(i=1) - assert isinstance(html_message, email.message.Message) - - # Extract the `bytes` from the html Message object, and decode to a `str`. - html = html_message.get_payload(decode=True) - assert isinstance(html, bytes) - html = html.decode() - - self.assertIn("_matrix/media/v1/thumbnail/DUMMY_MEDIA_ID", html) - - def test_empty_room(self) -> None: - """All users leaving a room shouldn't cause the pusher to break.""" - # Create a simple room with two users - room = self.helper.create_room_as(self.user_id, tok=self.access_token) - self.helper.invite( - room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id - ) - self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) - - # The other user sends a single message. - self.helper.send(room, body="Hi!", tok=self.others[0].token) - - # Leave the room before the message is processed. - self.helper.leave(room, self.user_id, tok=self.access_token) - self.helper.leave(room, self.others[0].id, tok=self.others[0].token) - - # We should get emailed about that message - self._check_for_mail() - - def test_empty_room_multiple_messages(self) -> None: - """All users leaving a room shouldn't cause the pusher to break.""" - # Create a simple room with two users - room = self.helper.create_room_as(self.user_id, tok=self.access_token) - self.helper.invite( - room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id - ) - self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) - - # The other user sends a single message. - self.helper.send(room, body="Hi!", tok=self.others[0].token) - self.helper.send(room, body="There!", tok=self.others[0].token) - - # Leave the room before the message is processed. - self.helper.leave(room, self.user_id, tok=self.access_token) - self.helper.leave(room, self.others[0].id, tok=self.others[0].token) - - # We should get emailed about that message - self._check_for_mail() - - def test_encrypted_message(self) -> None: - room = self.helper.create_room_as(self.user_id, tok=self.access_token) - self.helper.invite( - room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id - ) - self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) - - # The other user sends some messages - self.helper.send_event(room, "m.room.encrypted", {}, tok=self.others[0].token) - - # We should get emailed about that message - self._check_for_mail() - - def test_no_email_sent_after_removed(self) -> None: - # Create a simple room with two users - room = self.helper.create_room_as(self.user_id, tok=self.access_token) - self.helper.invite( - room=room, - src=self.user_id, - tok=self.access_token, - targ=self.others[0].id, - ) - self.helper.join( - room=room, - user=self.others[0].id, - tok=self.others[0].token, - ) - - # The other user sends a single message. - self.helper.send(room, body="Hi!", tok=self.others[0].token) - - # We should get emailed about that message - self._check_for_mail() - - # disassociate the user's email address - self.get_success( - self.auth_handler.delete_local_threepid( - user_id=self.user_id, medium="email", address="a@example.com" - ) - ) - - # check that the pusher for that email address has been deleted - pushers = list( - self.get_success( - self.hs.get_datastores().main.get_pushers_by( - {"user_name": self.user_id} - ) - ) - ) - self.assertEqual(len(pushers), 0) - - def test_remove_unlinked_pushers_background_job(self) -> None: - """Checks that all existing pushers associated with unlinked email addresses are removed - upon running the remove_deleted_email_pushers background update. - """ - # disassociate the user's email address manually (without deleting the pusher). - # This resembles the old behaviour, which the background update below is intended - # to clean up. - self.get_success( - self.hs.get_datastores().main.user_delete_threepid( - self.user_id, "email", "a@example.com" - ) - ) - - # Run the "remove_deleted_email_pushers" background job - self.get_success( - self.hs.get_datastores().main.db_pool.simple_insert( - table="background_updates", - values={ - "update_name": "remove_deleted_email_pushers", - "progress_json": "{}", - "depends_on": None, - }, - ) - ) - - # ... and tell the DataStore that it hasn't finished all updates yet - self.hs.get_datastores().main.db_pool.updates._all_done = False - - # Now let's actually drive the updates to completion - self.wait_for_background_updates() - - # Check that all pushers with unlinked addresses were deleted - pushers = list( - self.get_success( - self.hs.get_datastores().main.get_pushers_by( - {"user_name": self.user_id} - ) - ) - ) - self.assertEqual(len(pushers), 0) - - def _check_for_mail(self) -> Tuple[Sequence, Dict]: - """ - Assert that synapse sent off exactly one email notification. - - Returns: - args and kwargs passed to synapse.reactor.send_email._sendmail for - that notification. - """ - # Get the stream ordering before it gets sent - pushers = list( - self.get_success( - self.hs.get_datastores().main.get_pushers_by( - {"user_name": self.user_id} - ) - ) - ) - self.assertEqual(len(pushers), 1) - last_stream_ordering = pushers[0].last_stream_ordering - - # Advance time a bit, so the pusher will register something has happened - self.pump(10) - - # It hasn't succeeded yet, so the stream ordering shouldn't have moved - pushers = list( - self.get_success( - self.hs.get_datastores().main.get_pushers_by( - {"user_name": self.user_id} - ) - ) - ) - self.assertEqual(len(pushers), 1) - self.assertEqual(last_stream_ordering, pushers[0].last_stream_ordering) - - # One email was attempted to be sent - self.assertEqual(len(self.email_attempts), 1) - - deferred, sendmail_args, sendmail_kwargs = self.email_attempts[0] - # Make the email succeed - deferred.callback(True) - self.pump() - - # One email was attempted to be sent - self.assertEqual(len(self.email_attempts), 1) - - # The stream ordering has increased - pushers = list( - self.get_success( - self.hs.get_datastores().main.get_pushers_by( - {"user_name": self.user_id} - ) - ) - ) - self.assertEqual(len(pushers), 1) - self.assertTrue(pushers[0].last_stream_ordering > last_stream_ordering) - - # Reset the attempts. - self.email_attempts = [] - return sendmail_args, sendmail_kwargs diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index bcca472617..b42fd284b6 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py
@@ -17,9 +17,11 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, List, Tuple +from typing import Any, Dict, List, Tuple from unittest.mock import Mock +from parameterized import parameterized + from twisted.internet.defer import Deferred from twisted.test.proto_helpers import MemoryReactor @@ -1085,3 +1087,161 @@ class HTTPPusherTests(HomeserverTestCase): self.pump() self.assertEqual(len(self.push_attempts), 11) + + @parameterized.expand( + [ + # Badge count disabled + (True, True), + (True, False), + # Badge count enabled + (False, True), + (False, False), + ] + ) + @override_config({"experimental_features": {"msc4076_enabled": True}}) + def test_msc4076_badge_count( + self, disable_badge_count: bool, event_id_only: bool + ) -> None: + # Register the user who gets notified + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Register the user who sends the message + other_user_id = self.register_user("otheruser", "pass") + other_access_token = self.login("otheruser", "pass") + + # Register the pusher with disable_badge_count set to True + user_tuple = self.get_success( + self.hs.get_datastores().main.get_user_by_access_token(access_token) + ) + assert user_tuple is not None + device_id = user_tuple.device_id + + # Set the push data dict based on test input parameters + push_data: Dict[str, Any] = { + "url": "http://example.com/_matrix/push/v1/notify", + } + if disable_badge_count: + push_data["org.matrix.msc4076.disable_badge_count"] = True + if event_id_only: + push_data["format"] = "event_id_only" + + self.get_success( + self.hs.get_pusherpool().add_or_update_pusher( + user_id=user_id, + device_id=device_id, + kind="http", + app_id="m.http", + app_display_name="HTTP Push Notifications", + device_display_name="pushy push", + pushkey="a@example.com", + lang=None, + data=push_data, + ) + ) + + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # The other user joins + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # The other user sends a message + self.helper.send(room, body="Hi!", tok=other_access_token) + + # Advance time a bit, so the pusher will register something has happened + self.pump() + + # One push was attempted to be sent + self.assertEqual(len(self.push_attempts), 1) + self.assertEqual( + self.push_attempts[0][1], "http://example.com/_matrix/push/v1/notify" + ) + + if disable_badge_count: + # Verify that the notification DOESN'T contain a counts field + self.assertNotIn("counts", self.push_attempts[0][2]["notification"]) + else: + # Ensure that the notification DOES contain a counts field + self.assertIn("counts", self.push_attempts[0][2]["notification"]) + self.assertEqual( + self.push_attempts[0][2]["notification"]["counts"]["unread"], 1 + ) + + def test_push_backoff(self) -> None: + """ + The HTTP pusher will backoff correctly if it fails to contact the pusher. + """ + + # Register the user who gets notified + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Register the user who sends the message + other_user_id = self.register_user("otheruser", "pass") + other_access_token = self.login("otheruser", "pass") + + # Register the pusher + user_tuple = self.get_success( + self.hs.get_datastores().main.get_user_by_access_token(access_token) + ) + assert user_tuple is not None + device_id = user_tuple.device_id + + self.get_success( + self.hs.get_pusherpool().add_or_update_pusher( + user_id=user_id, + device_id=device_id, + kind="http", + app_id="m.http", + app_display_name="HTTP Push Notifications", + device_display_name="pushy push", + pushkey="a@example.com", + lang=None, + data={"url": "http://example.com/_matrix/push/v1/notify"}, + ) + ) + + # Create a room with the other user + room = self.helper.create_room_as(user_id, tok=access_token) + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # The other user sends some messages + self.helper.send(room, body="Message 1", tok=other_access_token) + + # One push was attempted to be sent + self.assertEqual(len(self.push_attempts), 1) + self.assertEqual( + self.push_attempts[0][1], "http://example.com/_matrix/push/v1/notify" + ) + self.assertEqual( + self.push_attempts[0][2]["notification"]["content"]["body"], "Message 1" + ) + self.push_attempts[0][0].callback({}) + self.pump() + + # Send another message, this time it fails + self.helper.send(room, body="Message 2", tok=other_access_token) + self.assertEqual(len(self.push_attempts), 2) + self.push_attempts[1][0].errback(Exception("couldn't connect")) + self.pump() + + # Sending yet another message doesn't trigger a push immediately + self.helper.send(room, body="Message 3", tok=other_access_token) + self.pump() + self.assertEqual(len(self.push_attempts), 2) + + # .. but waiting for a bit will cause more pushes + self.reactor.advance(10) + self.assertEqual(len(self.push_attempts), 3) + self.assertEqual( + self.push_attempts[2][2]["notification"]["content"]["body"], "Message 2" + ) + self.push_attempts[2][0].callback({}) + self.pump() + + self.assertEqual(len(self.push_attempts), 4) + self.assertEqual( + self.push_attempts[3][2]["notification"]["content"]["body"], "Message 3" + ) + self.push_attempts[3][0].callback({}) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py
index 420fbea998..3898532acf 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py
@@ -149,6 +149,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): content: JsonMapping, *, related_events: Optional[JsonDict] = None, + msc4210: bool = False, ) -> PushRuleEvaluator: event = FrozenEvent( { @@ -174,6 +175,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): related_event_match_enabled=True, room_version_feature_flags=event.room_version.msc3931_push_features, msc3931_enabled=True, + msc4210_enabled=msc4210, ) def test_display_name(self) -> None: @@ -452,6 +454,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): {"value": False}, "incorrect values should not match", ) + value: Any for value in ("foobaz", 1, 1.1, None, [], {}): self._assert_not_matches( condition, @@ -492,6 +495,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): {"value": None}, "exact value should match", ) + value: Any for value in ("foobaz", True, False, 1, 1.1, [], {}): self._assert_not_matches( condition, diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py
index 2eaad3707a..31d3163c01 100644 --- a/tests/replication/http/test__base.py +++ b/tests/replication/http/test__base.py
@@ -46,7 +46,7 @@ class CancellableReplicationEndpoint(ReplicationEndpoint): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload() -> JsonDict: + async def _serialize_payload(**kwargs: ReplicationEndpoint) -> JsonDict: return {} @cancellable @@ -68,7 +68,7 @@ class UncancellableReplicationEndpoint(ReplicationEndpoint): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload() -> JsonDict: + async def _serialize_payload(**kwargs: ReplicationEndpoint) -> JsonDict: return {} async def _handle_request( # type: ignore[override] diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py
index fdc74efb5a..2a0189a4e1 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py
@@ -324,7 +324,7 @@ class EventsStreamTestCase(BaseStreamTestCase): pls = self.helper.get_state( self.room_id, EventTypes.PowerLevels, tok=self.user_tok ) - pls["users"].update({u: 50 for u in user_ids}) + pls["users"].update(dict.fromkeys(user_ids, 50)) self.helper.send_state( self.room_id, EventTypes.PowerLevels, diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py
index 4429d0f4e2..58a7a9dc72 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py
@@ -22,14 +22,26 @@ import logging from unittest.mock import AsyncMock, Mock from netaddr import IPSet +from signedjson.key import ( + encode_verify_key_base64, + generate_signing_key, + get_verify_key, +) + +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, Membership -from synapse.events.builder import EventBuilderFactory +from synapse.api.room_versions import RoomVersion +from synapse.crypto.event_signing import add_hashes_and_signatures +from synapse.events import EventBase, make_event_from_dict from synapse.handlers.typing import TypingWriterHandler from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.rest.admin import register_servlets_for_client_rest_resource from synapse.rest.client import login, room -from synapse.types import UserID, create_requester +from synapse.server import HomeServer +from synapse.storage.keys import FetchKeyResult +from synapse.types import JsonDict, UserID, create_requester +from synapse.util import Clock from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import get_clock @@ -63,6 +75,9 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): ip_blocklist=IPSet(), ) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.storage_controllers = hs.get_storage_controllers() + def test_send_event_single_sender(self) -> None: """Test that using a single federation sender worker correctly sends a new event. @@ -243,35 +258,92 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): self.assertTrue(sent_on_1) self.assertTrue(sent_on_2) + def create_fake_event_from_remote_server( + self, remote_server_name: str, event_dict: JsonDict, room_version: RoomVersion + ) -> EventBase: + """ + This is similar to what `FederatingHomeserverTestCase` is doing but we don't + need all of the extra baggage and we want to be able to create an event from + many remote servers. + """ + + # poke the other server's signing key into the key store, so that we don't + # make requests for it + other_server_signature_key = generate_signing_key("test") + verify_key = get_verify_key(other_server_signature_key) + verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version) + + self.get_success( + self.hs.get_datastores().main.store_server_keys_response( + remote_server_name, + from_server=remote_server_name, + ts_added_ms=self.clock.time_msec(), + verify_keys={ + verify_key_id: FetchKeyResult( + verify_key=verify_key, + valid_until_ts=self.clock.time_msec() + 10000, + ), + }, + response_json={ + "verify_keys": { + verify_key_id: {"key": encode_verify_key_base64(verify_key)} + } + }, + ) + ) + + add_hashes_and_signatures( + room_version=room_version, + event_dict=event_dict, + signature_name=remote_server_name, + signing_key=other_server_signature_key, + ) + event = make_event_from_dict( + event_dict, + room_version=room_version, + ) + + return event + def create_room_with_remote_server( self, user: str, token: str, remote_server: str = "other_server" ) -> str: - room = self.helper.create_room_as(user, tok=token) + room_id = self.helper.create_room_as(user, tok=token) store = self.hs.get_datastores().main federation = self.hs.get_federation_event_handler() - prev_event_ids = self.get_success(store.get_latest_event_ids_in_room(room)) - room_version = self.get_success(store.get_room_version(room)) + room_version = self.get_success(store.get_room_version(room_id)) - factory = EventBuilderFactory(self.hs) - factory.hostname = remote_server + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + + # Figure out what the forward extremities in the room are (the most recent + # events that aren't tied into the DAG) + prev_event_ids = self.get_success(store.get_latest_event_ids_in_room(room_id)) user_id = UserID("user", remote_server).to_string() - event_dict = { - "type": EventTypes.Member, - "state_key": user_id, - "content": {"membership": Membership.JOIN}, - "sender": user_id, - "room_id": room, - } - - builder = factory.for_room_version(room_version, event_dict) - join_event = self.get_success( - builder.build(prev_event_ids=list(prev_event_ids), auth_event_ids=None) + join_event = self.create_fake_event_from_remote_server( + remote_server_name=remote_server, + event_dict={ + "room_id": room_id, + "sender": user_id, + "type": EventTypes.Member, + "state_key": user_id, + "depth": 1000, + "origin_server_ts": 1, + "content": {"membership": Membership.JOIN}, + "auth_events": [ + state_map[(EventTypes.Create, "")].event_id, + state_map[(EventTypes.JoinRules, "")].event_id, + ], + "prev_events": list(prev_event_ids), + }, + room_version=room_version, ) self.get_success(federation.on_send_membership_event(remote_server, join_event)) self.replicate() - return room + return room_id diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py
index 6fc4600c41..f36af877c4 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py
@@ -40,6 +40,7 @@ from tests.http import ( from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import FakeChannel, FakeTransport, make_request from tests.test_utils import SMALL_PNG +from tests.unittest import override_config logger = logging.getLogger(__name__) @@ -148,6 +149,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): return channel, request + @override_config({"enable_authenticated_media": False}) def test_basic(self) -> None: """Test basic fetching of remote media from a single worker.""" hs1 = self.make_worker_hs("synapse.app.generic_worker") @@ -164,6 +166,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): self.assertEqual(channel.code, 200) self.assertEqual(channel.result["body"], b"Hello!") + @override_config({"enable_authenticated_media": False}) def test_download_simple_file_race(self) -> None: """Test that fetching remote media from two different processes at the same time works. @@ -203,6 +206,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): # We expect only one new file to have been persisted. self.assertEqual(start_count + 1, self._count_remote_media()) + @override_config({"enable_authenticated_media": False}) def test_download_image_race(self) -> None: """Test that fetching remote *images* from two different processes at the same time works. diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 6351326fff..fc2a6c569b 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py
@@ -20,7 +20,7 @@ # import urllib.parse -from typing import Dict +from typing import Dict, cast from parameterized import parameterized @@ -30,8 +30,9 @@ from twisted.web.resource import Resource import synapse.rest.admin from synapse.http.server import JsonResource from synapse.rest.admin import VersionServlet -from synapse.rest.client import login, room +from synapse.rest.client import login, media, room from synapse.server import HomeServer +from synapse.types import UserID from synapse.util import Clock from tests import unittest @@ -60,6 +61,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, login.register_servlets, + media.register_servlets, room.register_servlets, ] @@ -74,7 +76,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): """Ensure a piece of media is quarantined when trying to access it.""" channel = self.make_request( "GET", - f"/_matrix/media/v3/download/{server_and_media_id}", + f"/_matrix/client/v1/media/download/{server_and_media_id}", shorthand=False, access_token=admin_user_tok, ) @@ -131,7 +133,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): # Attempt to access the media channel = self.make_request( "GET", - f"/_matrix/media/v3/download/{server_name_and_media_id}", + f"/_matrix/client/v1/media/download/{server_name_and_media_id}", shorthand=False, access_token=non_admin_user_tok, ) @@ -226,10 +228,25 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): # Upload some media response_1 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) response_2 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) + response_3 = self.helper.upload_media(SMALL_PNG, tok=non_admin_user_tok) # Extract media IDs server_and_media_id_1 = response_1["content_uri"][6:] server_and_media_id_2 = response_2["content_uri"][6:] + server_and_media_id_3 = response_3["content_uri"][6:] + + # Remove the hash from the media to simulate historic media. + self.get_success( + self.hs.get_datastores().main.update_local_media( + media_id=server_and_media_id_3.split("/")[1], + media_type="image/png", + upload_name=None, + media_length=123, + user_id=UserID.from_string(non_admin_user), + # Hack to force some media to have no hash. + sha256=cast(str, None), + ) + ) # Quarantine all media by this user url = "/_synapse/admin/v1/user/%s/media/quarantine" % urllib.parse.quote( @@ -243,12 +260,13 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): self.pump(1.0) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual( - channel.json_body, {"num_quarantined": 2}, "Expected 2 quarantined items" + channel.json_body, {"num_quarantined": 3}, "Expected 3 quarantined items" ) # Attempt to access each piece of media self._ensure_quarantined(admin_user_tok, server_and_media_id_1) self._ensure_quarantined(admin_user_tok, server_and_media_id_2) + self._ensure_quarantined(admin_user_tok, server_and_media_id_3) def test_cannot_quarantine_safe_media(self) -> None: self.register_user("user_admin", "pass", admin=True) @@ -295,7 +313,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): # Attempt to access each piece of media channel = self.make_request( "GET", - f"/_matrix/media/v3/download/{server_and_media_id_2}", + f"/_matrix/client/v1/media/download/{server_and_media_id_2}", shorthand=False, access_token=non_admin_user_tok, ) diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py
index a88c77bd19..531162a6e9 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py
@@ -27,7 +27,7 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.errors import Codes from synapse.handlers.device import DeviceHandler -from synapse.rest.client import login +from synapse.rest.client import devices, login from synapse.server import HomeServer from synapse.util import Clock @@ -299,6 +299,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): class DevicesRestTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, + devices.register_servlets, login.register_servlets, ] @@ -390,15 +391,63 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["devices"])) + @unittest.override_config( + {"experimental_features": {"msc2697_enabled": False, "msc3814_enabled": True}} + ) def test_get_devices(self) -> None: """ Tests that a normal lookup for devices is successfully """ # Create devices number_devices = 5 - for _ in range(number_devices): + # we create 2 fewer devices in the loop, because we will create another + # login after the loop, and we will create a dehydrated device + for _ in range(number_devices - 2): self.login("user", "pass") + other_user_token = self.login("user", "pass") + dehydrated_device_url = ( + "/_matrix/client/unstable/org.matrix.msc3814.v1/dehydrated_device" + ) + content = { + "device_data": { + "algorithm": "m.dehydration.v1.olm", + }, + "device_id": "dehydrated_device", + "initial_device_display_name": "foo bar", + "device_keys": { + "user_id": "@user:test", + "device_id": "dehydrated_device", + "valid_until_ts": "80", + "algorithms": [ + "m.olm.curve25519-aes-sha2", + ], + "keys": { + "<algorithm>:<device_id>": "<key_base64>", + }, + "signatures": { + "@user:test": {"<algorithm>:<device_id>": "<signature_base64>"} + }, + }, + "fallback_keys": { + "alg1:device1": "f4llb4ckk3y", + "signed_<algorithm>:<device_id>": { + "fallback": "true", + "key": "f4llb4ckk3y", + "signatures": { + "@user:test": {"<algorithm>:<device_id>": "<key_base64>"} + }, + }, + }, + "one_time_keys": {"alg1:k1": "0net1m3k3y"}, + } + self.make_request( + "PUT", + dehydrated_device_url, + access_token=other_user_token, + content=content, + ) + # Get devices channel = self.make_request( "GET", @@ -410,13 +459,22 @@ class DevicesRestTestCase(unittest.HomeserverTestCase): self.assertEqual(number_devices, channel.json_body["total"]) self.assertEqual(number_devices, len(channel.json_body["devices"])) self.assertEqual(self.other_user, channel.json_body["devices"][0]["user_id"]) - # Check that all fields are available + # Check that all fields are available, and that the dehydrated device is marked as dehydrated + found_dehydrated = False for d in channel.json_body["devices"]: self.assertIn("user_id", d) self.assertIn("device_id", d) self.assertIn("display_name", d) self.assertIn("last_seen_ip", d) self.assertIn("last_seen_ts", d) + if d["device_id"] == "dehydrated_device": + self.assertTrue(d.get("dehydrated")) + found_dehydrated = True + else: + # Either the field is not present, or set to False + self.assertFalse(d.get("dehydrated")) + + self.assertTrue(found_dehydrated) class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py
index feb410a11d..6047ce1f4a 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py
@@ -378,6 +378,41 @@ class EventReportsTestCase(unittest.HomeserverTestCase): self.assertEqual(len(channel.json_body["event_reports"]), 1) self.assertNotIn("next_token", channel.json_body) + def test_filter_against_event_sender(self) -> None: + """ + Tests filtering by the sender of the reported event + """ + # first grab all the reports + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(channel.code, 200) + + # filter out set of report ids of events sent by one of the users + locally_filtered_report_ids = set() + for event_report in channel.json_body["event_reports"]: + if event_report["sender"] == self.other_user: + locally_filtered_report_ids.add(event_report["id"]) + + # grab the report ids by sender and compare to filtered report ids + channel = self.make_request( + "GET", + f"{self.url}?event_sender_user_id={self.other_user}", + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code) + self.assertEqual(channel.json_body["total"], len(locally_filtered_report_ids)) + + event_reports = channel.json_body["event_reports"] + server_filtered_report_ids = set() + for event_report in event_reports: + server_filtered_report_ids.add(event_report["id"]) + self.assertIncludes( + locally_filtered_report_ids, server_filtered_report_ids, exact=True + ) + def _create_event_and_report(self, room_id: str, user_tok: str) -> None: """Create and report events""" resp = self.helper.send(room_id, tok=user_tok) diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py
index c2015774a1..d5ae3345f5 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py
@@ -96,7 +96,7 @@ class FederationTestCase(unittest.HomeserverTestCase): self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - # unkown order_by + # unknown order_by channel = self.make_request( "GET", self.url + "?order_by=bar", diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index f378165513..da0e9749aa 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py
@@ -35,7 +35,8 @@ from synapse.server import HomeServer from synapse.util import Clock from tests import unittest -from tests.test_utils import SMALL_PNG +from tests.test_utils import SMALL_CMYK_JPEG, SMALL_PNG +from tests.unittest import override_config VALID_TIMESTAMP = 1609459200000 # 2021-01-01 in milliseconds INVALID_TIMESTAMP_IN_S = 1893456000 # 2030-01-01 in seconds @@ -126,6 +127,7 @@ class DeleteMediaByIDTestCase(_AdminMediaTests): self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only delete local media", channel.json_body["error"]) + @override_config({"enable_authenticated_media": False}) def test_delete_media(self) -> None: """ Tests that delete a media is successfully @@ -371,6 +373,7 @@ class DeleteMediaByDateSizeTestCase(_AdminMediaTests): self._access_media(server_and_media_id, False) + @override_config({"enable_authenticated_media": False}) def test_keep_media_by_date(self) -> None: """ Tests that media is not deleted if it is newer than `before_ts` @@ -408,6 +411,7 @@ class DeleteMediaByDateSizeTestCase(_AdminMediaTests): self._access_media(server_and_media_id, False) + @override_config({"enable_authenticated_media": False}) def test_keep_media_by_size(self) -> None: """ Tests that media is not deleted if its size is smaller than or equal @@ -443,6 +447,7 @@ class DeleteMediaByDateSizeTestCase(_AdminMediaTests): self._access_media(server_and_media_id, False) + @override_config({"enable_authenticated_media": False}) def test_keep_media_by_user_avatar(self) -> None: """ Tests that we do not delete media if is used as a user avatar @@ -487,6 +492,7 @@ class DeleteMediaByDateSizeTestCase(_AdminMediaTests): self._access_media(server_and_media_id, False) + @override_config({"enable_authenticated_media": False}) def test_keep_media_by_room_avatar(self) -> None: """ Tests that we do not delete media if it is used as a room avatar @@ -592,23 +598,27 @@ class DeleteMediaByDateSizeTestCase(_AdminMediaTests): class QuarantineMediaByIDTestCase(_AdminMediaTests): - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - self.server_name = hs.hostname - - self.admin_user = self.register_user("admin", "pass", admin=True) - self.admin_user_tok = self.login("admin", "pass") - + def upload_media_and_return_media_id(self, data: bytes) -> str: # Upload some media into the room response = self.helper.upload_media( - SMALL_PNG, + data, tok=self.admin_user_tok, expect_code=200, ) # Extract media ID from the response server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' - self.media_id = server_and_media_id.split("/")[1] + return server_and_media_id.split("/")[1] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.server_name = hs.hostname + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + self.media_id = self.upload_media_and_return_media_id(SMALL_PNG) + self.media_id_2 = self.upload_media_and_return_media_id(SMALL_PNG) + self.media_id_3 = self.upload_media_and_return_media_id(SMALL_PNG) + self.media_id_other = self.upload_media_and_return_media_id(SMALL_CMYK_JPEG) self.url = "/_synapse/admin/v1/media/%s/%s/%s" @parameterized.expand(["quarantine", "unquarantine"]) @@ -680,6 +690,52 @@ class QuarantineMediaByIDTestCase(_AdminMediaTests): assert media_info is not None self.assertFalse(media_info.quarantined_by) + def test_quarantine_media_match_hash(self) -> None: + """ + Tests that quarantining removes all media with the same hash + """ + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + assert media_info is not None + self.assertFalse(media_info.quarantined_by) + + # quarantining + channel = self.make_request( + "POST", + self.url % ("quarantine", self.server_name, self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + # Test that ALL similar media was quarantined. + for media in [self.media_id, self.media_id_2, self.media_id_3]: + media_info = self.get_success(self.store.get_local_media(media)) + assert media_info is not None + self.assertTrue(media_info.quarantined_by) + + # Test that other media was not. + media_info = self.get_success(self.store.get_local_media(self.media_id_other)) + assert media_info is not None + self.assertFalse(media_info.quarantined_by) + + # remove from quarantine + channel = self.make_request( + "POST", + self.url % ("unquarantine", self.server_name, self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + # Test that ALL similar media is now reset. + for media in [self.media_id, self.media_id_2, self.media_id_3]: + media_info = self.get_success(self.store.get_local_media(media)) + assert media_info is not None + self.assertFalse(media_info.quarantined_by) + def test_quarantine_protected_media(self) -> None: """ Tests that quarantining from protected media fails diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index 95ed736451..e22dfcba1b 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py
@@ -369,6 +369,47 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self._is_blocked(room_id) + def test_invited_users_not_joined_to_new_room(self) -> None: + """ + Test that when a new room id is provided, users who are only invited + but have not joined original room are not moved to new room. + """ + invitee = self.register_user("invitee", "pass") + + self.helper.invite( + self.room_id, self.other_user, invitee, tok=self.other_user_tok + ) + + # verify that user is invited + channel = self.make_request( + "GET", + f"/_matrix/client/v3/rooms/{self.room_id}/members?membership=invite", + access_token=self.other_user_tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(len(channel.json_body["chunk"]), 1) + invite = channel.json_body["chunk"][0] + self.assertEqual(invite["state_key"], invitee) + + # shutdown room + channel = self.make_request( + "DELETE", + self.url, + {"new_room_user_id": self.admin_user}, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(len(channel.json_body["kicked_users"]), 2) + + # joined member is moved to new room but invited user is not + users_in_room = self.get_success( + self.store.get_users_in_room(channel.json_body["new_room_id"]) + ) + self.assertNotIn(invitee, users_in_room) + self.assertIn(self.other_user, users_in_room) + self._is_purged(self.room_id) + self._has_no_members(self.room_id) + def test_shutdown_room_consent(self) -> None: """Test that we can shutdown rooms with local users who have not yet accepted the privacy policy. This used to fail when we tried to @@ -758,6 +799,8 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.assertEqual(2, len(channel.json_body["results"])) self.assertEqual("complete", channel.json_body["results"][0]["status"]) self.assertEqual("complete", channel.json_body["results"][1]["status"]) + self.assertEqual(self.room_id, channel.json_body["results"][0]["room_id"]) + self.assertEqual(self.room_id, channel.json_body["results"][1]["room_id"]) delete_ids = {delete_id1, delete_id2} self.assertTrue(channel.json_body["results"][0]["delete_id"] in delete_ids) delete_ids.remove(channel.json_body["results"][0]["delete_id"]) @@ -777,6 +820,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.assertEqual(1, len(channel.json_body["results"])) self.assertEqual("complete", channel.json_body["results"][0]["status"]) self.assertEqual(delete_id2, channel.json_body["results"][0]["delete_id"]) + self.assertEqual(self.room_id, channel.json_body["results"][0]["room_id"]) # get status after more than clearing time for all tasks self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2) @@ -1237,6 +1281,9 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): self.assertEqual( delete_id, channel_room_id.json_body["results"][0]["delete_id"] ) + self.assertEqual( + self.room_id, channel_room_id.json_body["results"][0]["room_id"] + ) # get information by delete_id channel_delete_id = self.make_request( @@ -1249,6 +1296,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase): channel_delete_id.code, msg=channel_delete_id.json_body, ) + self.assertEqual(self.room_id, channel_delete_id.json_body["room_id"]) # test values that are the same in both responses for content in [ @@ -1282,6 +1330,7 @@ class RoomTestCase(unittest.HomeserverTestCase): self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") + @unittest.override_config({"room_list_publication_rules": [{"action": "allow"}]}) def test_list_rooms(self) -> None: """Test that we can list rooms""" # Create 3 test rooms @@ -1311,7 +1360,7 @@ class RoomTestCase(unittest.HomeserverTestCase): # Check that response json body contains a "rooms" key self.assertTrue( "rooms" in channel.json_body, - msg="Response body does not " "contain a 'rooms' key", + msg="Response body does not contain a 'rooms' key", ) # Check that 3 rooms were returned @@ -1795,6 +1844,7 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertEqual(room_id, channel.json_body["rooms"][0].get("room_id")) self.assertEqual("ж", channel.json_body["rooms"][0].get("name")) + @unittest.override_config({"room_list_publication_rules": [{"action": "allow"}]}) def test_filter_public_rooms(self) -> None: self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok, is_public=True @@ -1872,6 +1922,7 @@ class RoomTestCase(unittest.HomeserverTestCase): self.assertEqual(1, response.json_body["total_rooms"]) self.assertEqual(1, len(response.json_body["rooms"])) + @unittest.override_config({"room_list_publication_rules": [{"action": "allow"}]}) def test_single_room(self) -> None: """Test that a single room can be requested correctly""" # Create two test rooms @@ -2035,6 +2086,52 @@ class RoomTestCase(unittest.HomeserverTestCase): # the create_room already does the right thing, so no need to verify that we got # the state events it created. + def test_room_state_param(self) -> None: + """Test that filtering by state event type works when requesting state""" + room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/rooms/{room_id}/state?type=m.room.member", + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code) + state = channel.json_body["state"] + # only one member has joined so there should be one membership event + self.assertEqual(1, len(state)) + event = state[0] + self.assertEqual(event["type"], "m.room.member") + self.assertEqual(event["state_key"], self.admin_user) + + def test_room_state_param_empty(self) -> None: + """Test that passing an empty string as state filter param returns no state events""" + room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/rooms/{room_id}/state?type=", + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code) + state = channel.json_body["state"] + self.assertEqual(5, len(state)) + + def test_room_state_param_not_in_room(self) -> None: + """ + Test that passing a state filter param for a state event not in the room + returns no state events + """ + room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/rooms/{room_id}/state?type=m.room.custom", + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code) + state = channel.json_body["state"] + self.assertEqual(0, len(state)) + def _set_canonical_alias( self, room_id: str, test_alias: str, admin_user_tok: str ) -> None: @@ -3050,7 +3147,7 @@ PURGE_TABLES = [ "pusher_throttle", "room_account_data", "room_tags", - # "state_groups", # Current impl leaves orphaned state groups around. + "state_groups", "state_groups_state", "federation_inbound_events_staging", ] diff --git a/tests/rest/admin/test_scheduled_tasks.py b/tests/rest/admin/test_scheduled_tasks.py new file mode 100644
index 0000000000..9654e9322b --- /dev/null +++ b/tests/rest/admin/test_scheduled_tasks.py
@@ -0,0 +1,192 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# +# +from typing import Mapping, Optional, Tuple + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.errors import Codes +from synapse.rest.client import login +from synapse.server import HomeServer +from synapse.types import JsonMapping, ScheduledTask, TaskStatus +from synapse.util import Clock + +from tests import unittest + + +class ScheduledTasksAdminApiTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + self._task_scheduler = hs.get_task_scheduler() + + # create and schedule a few tasks + async def _test_task( + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + return TaskStatus.ACTIVE, None, None + + async def _finished_test_task( + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + return TaskStatus.COMPLETE, None, None + + async def _failed_test_task( + task: ScheduledTask, + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + return TaskStatus.FAILED, None, "Everything failed" + + self._task_scheduler.register_action(_test_task, "test_task") + self.get_success( + self._task_scheduler.schedule_task("test_task", resource_id="test") + ) + + self._task_scheduler.register_action(_finished_test_task, "finished_test_task") + self.get_success( + self._task_scheduler.schedule_task( + "finished_test_task", resource_id="finished_task" + ) + ) + + self._task_scheduler.register_action(_failed_test_task, "failed_test_task") + self.get_success( + self._task_scheduler.schedule_task( + "failed_test_task", resource_id="failed_task" + ) + ) + + def check_scheduled_tasks_response(self, scheduled_tasks: Mapping) -> list: + result = [] + for task in scheduled_tasks: + if task["resource_id"] == "test": + self.assertEqual(task["status"], TaskStatus.ACTIVE) + self.assertEqual(task["action"], "test_task") + result.append(task) + if task["resource_id"] == "finished_task": + self.assertEqual(task["status"], TaskStatus.COMPLETE) + self.assertEqual(task["action"], "finished_test_task") + result.append(task) + if task["resource_id"] == "failed_task": + self.assertEqual(task["status"], TaskStatus.FAILED) + self.assertEqual(task["action"], "failed_test_task") + result.append(task) + + return result + + def test_requester_is_not_admin(self) -> None: + """ + If the user is not a server admin, an error 403 is returned. + """ + + self.register_user("user", "pass", admin=False) + other_user_tok = self.login("user", "pass") + + channel = self.make_request( + "GET", + "/_synapse/admin/v1/scheduled_tasks", + content={}, + access_token=other_user_tok, + ) + + self.assertEqual(403, channel.code, msg=channel.json_body) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_scheduled_tasks(self) -> None: + """ + Test that endpoint returns scheduled tasks. + """ + + channel = self.make_request( + "GET", + "/_synapse/admin/v1/scheduled_tasks", + content={}, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + scheduled_tasks = channel.json_body["scheduled_tasks"] + + # make sure we got back all the scheduled tasks + found_tasks = self.check_scheduled_tasks_response(scheduled_tasks) + self.assertEqual(len(found_tasks), 3) + + def test_filtering_scheduled_tasks(self) -> None: + """ + Test that filtering the scheduled tasks response via query params works as expected. + """ + # filter via job_status + channel = self.make_request( + "GET", + "/_synapse/admin/v1/scheduled_tasks?job_status=active", + content={}, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + scheduled_tasks = channel.json_body["scheduled_tasks"] + found_tasks = self.check_scheduled_tasks_response(scheduled_tasks) + + # only the active task should have been returned + self.assertEqual(len(found_tasks), 1) + self.assertEqual(found_tasks[0]["status"], "active") + + # filter via action_name + channel = self.make_request( + "GET", + "/_synapse/admin/v1/scheduled_tasks?action_name=test_task", + content={}, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + scheduled_tasks = channel.json_body["scheduled_tasks"] + + # only test_task should have been returned + found_tasks = self.check_scheduled_tasks_response(scheduled_tasks) + self.assertEqual(len(found_tasks), 1) + self.assertEqual(found_tasks[0]["action"], "test_task") + + # filter via max_timestamp + channel = self.make_request( + "GET", + "/_synapse/admin/v1/scheduled_tasks?max_timestamp=0", + content={}, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + scheduled_tasks = channel.json_body["scheduled_tasks"] + found_tasks = self.check_scheduled_tasks_response(scheduled_tasks) + + # none should have been returned + self.assertEqual(len(found_tasks), 0) + + # filter via resource id + channel = self.make_request( + "GET", + "/_synapse/admin/v1/scheduled_tasks?resource_id=failed_task", + content={}, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + scheduled_tasks = channel.json_body["scheduled_tasks"] + found_tasks = self.check_scheduled_tasks_response(scheduled_tasks) + + # only the task with the matching resource id should have been returned + self.assertEqual(len(found_tasks), 1) + self.assertEqual(found_tasks[0]["resource_id"], "failed_task") diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py
index 2a1e42bbc8..150caeeee2 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py
@@ -531,9 +531,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): # simulate a change in server config after a server restart. new_display_name = "new display name" - self.server_notices_manager._config.servernotices.server_notices_mxid_display_name = ( - new_display_name - ) + self.server_notices_manager._config.servernotices.server_notices_mxid_display_name = new_display_name self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all() self.make_request( @@ -577,9 +575,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): # simulate a change in server config after a server restart. new_avatar_url = "test/new-url" - self.server_notices_manager._config.servernotices.server_notices_mxid_avatar_url = ( - new_avatar_url - ) + self.server_notices_manager._config.servernotices.server_notices_mxid_avatar_url = new_avatar_url self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all() self.make_request( @@ -692,9 +688,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): # simulate a change in server config after a server restart. new_avatar_url = "test/new-url" - self.server_notices_manager._config.servernotices.server_notices_room_avatar_url = ( - new_avatar_url - ) + self.server_notices_manager._config.servernotices.server_notices_room_avatar_url = new_avatar_url self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all() self.make_request( diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py
index 5f60e19e56..07ec49c4e5 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py
@@ -82,7 +82,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase): """ If parameters are invalid, an error is returned. """ - # unkown order_by + # unknown order_by channel = self.make_request( "GET", self.url + "?order_by=bar", diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 16bb4349f5..412718f06c 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py
@@ -21,9 +21,12 @@ import hashlib import hmac +import json import os +import time import urllib.parse from binascii import unhexlify +from http import HTTPStatus from typing import Dict, List, Optional from unittest.mock import AsyncMock, Mock, patch @@ -33,7 +36,13 @@ from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource import synapse.rest.admin -from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes +from synapse.api.constants import ( + ApprovalNoticeMedium, + EventContentFields, + EventTypes, + LoginType, + UserTypes, +) from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError from synapse.api.room_versions import RoomVersions from synapse.media.filepath import MediaFilePaths @@ -42,6 +51,7 @@ from synapse.rest.client import ( devices, login, logout, + media, profile, register, room, @@ -54,7 +64,9 @@ from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock from tests import unittest +from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.test_utils import SMALL_PNG +from tests.test_utils.event_injection import inject_event from tests.unittest import override_config @@ -316,6 +328,61 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Invalid user type", channel.json_body["error"]) + @override_config( + { + "user_types": { + "extra_user_types": ["extra1", "extra2"], + } + } + ) + def test_extra_user_type(self) -> None: + """ + Check that the extra user type can be used when registering a user. + """ + + def nonce_mac(user_type: str) -> tuple[str, str]: + """ + Get a nonce and the expected HMAC for that nonce. + """ + channel = self.make_request("GET", self.url) + nonce = channel.json_body["nonce"] + + want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) + want_mac.update( + nonce.encode("ascii") + + b"\x00alice\x00abc123\x00notadmin\x00" + + user_type.encode("ascii") + ) + want_mac_str = want_mac.hexdigest() + + return nonce, want_mac_str + + nonce, mac = nonce_mac("extra1") + # Valid user_type + body = { + "nonce": nonce, + "username": "alice", + "password": "abc123", + "user_type": "extra1", + "mac": mac, + } + channel = self.make_request("POST", self.url, body) + self.assertEqual(200, channel.code, msg=channel.json_body) + + nonce, mac = nonce_mac("extra3") + # Invalid user_type + body = { + "nonce": nonce, + "username": "alice", + "password": "abc123", + "user_type": "extra3", + "mac": mac, + } + channel = self.make_request("POST", self.url, body) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Invalid user type", channel.json_body["error"]) + def test_displayname(self) -> None: """ Test that displayname of new user is set @@ -715,7 +782,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - # unkown order_by + # unknown order_by channel = self.make_request( "GET", self.url + "?order_by=bar", @@ -1174,6 +1241,80 @@ class UsersListTestCase(unittest.HomeserverTestCase): not_user_types=["custom"], ) + @override_config( + { + "user_types": { + "extra_user_types": ["extra1", "extra2"], + } + } + ) + def test_filter_not_user_types_with_extra(self) -> None: + """Tests that the endpoint handles the not_user_types param when extra_user_types are configured""" + + regular_user_id = self.register_user("normalo", "secret") + + extra1_user_id = self.register_user("extra1", "secret") + self.make_request( + "PUT", + "/_synapse/admin/v2/users/" + urllib.parse.quote(extra1_user_id), + {"user_type": "extra1"}, + access_token=self.admin_user_tok, + ) + + def test_user_type( + expected_user_ids: List[str], not_user_types: Optional[List[str]] = None + ) -> None: + """Runs a test for the not_user_types param + Args: + expected_user_ids: Ids of the users that are expected to be returned + not_user_types: List of values for the not_user_types param + """ + + user_type_query = "" + + if not_user_types is not None: + user_type_query = "&".join( + [f"not_user_type={u}" for u in not_user_types] + ) + + test_url = f"{self.url}?{user_type_query}" + channel = self.make_request( + "GET", + test_url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code) + self.assertEqual(channel.json_body["total"], len(expected_user_ids)) + self.assertEqual( + expected_user_ids, + [u["name"] for u in channel.json_body["users"]], + ) + + # Request without user_types → all users expected + test_user_type([self.admin_user, extra1_user_id, regular_user_id]) + + # Request and exclude extra1 user type + test_user_type( + [self.admin_user, regular_user_id], + not_user_types=["extra1"], + ) + + # Request and exclude extra1 and extra2 user types + test_user_type( + [self.admin_user, regular_user_id], + not_user_types=["extra1", "extra2"], + ) + + # Request and exclude empty user types → only expected the extra1 user + test_user_type([extra1_user_id], not_user_types=[""]) + + # Request and exclude an unregistered type → expect all users + test_user_type( + [self.admin_user, extra1_user_id, regular_user_id], + not_user_types=["extra3"], + ) + def test_erasure_status(self) -> None: # Create a new user. user_id = self.register_user("eraseme", "eraseme") @@ -1411,9 +1552,6 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): UserID.from_string("@user:test"), "mxc://servername/mediaid" ) ) - self.get_success( - self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0) - ) def test_no_auth(self) -> None: """ @@ -1500,7 +1638,6 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(False, channel.json_body["deactivated"]) - self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"]) self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"]) self.assertEqual("User1", channel.json_body["displayname"]) self.assertFalse(channel.json_body["erased"]) @@ -1525,7 +1662,6 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(True, channel.json_body["deactivated"]) - self.assertEqual(0, len(channel.json_body["threepids"])) self.assertIsNone(channel.json_body["avatar_url"]) self.assertIsNone(channel.json_body["displayname"]) self.assertTrue(channel.json_body["erased"]) @@ -1568,7 +1704,6 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(False, channel.json_body["deactivated"]) - self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"]) self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"]) self.assertEqual("User1", channel.json_body["displayname"]) @@ -1592,7 +1727,6 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(True, channel.json_body["deactivated"]) - self.assertEqual(0, len(channel.json_body["threepids"])) self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"]) self.assertEqual("User1", channel.json_body["displayname"]) @@ -1622,7 +1756,6 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(False, channel.json_body["deactivated"]) - self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"]) self.assertIsNone(channel.json_body["avatar_url"]) self.assertIsNone(channel.json_body["displayname"]) @@ -1646,7 +1779,6 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(True, channel.json_body["deactivated"]) - self.assertEqual(0, len(channel.json_body["threepids"])) self.assertIsNone(channel.json_body["avatar_url"]) self.assertIsNone(channel.json_body["displayname"]) @@ -1817,25 +1949,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) - # threepids not valid - channel = self.make_request( - "PUT", - self.url_other_user, - access_token=self.admin_user_tok, - content={"threepids": {"medium": "email", "wrong_address": "id"}}, - ) - self.assertEqual(400, channel.code, msg=channel.json_body) - self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) - - channel = self.make_request( - "PUT", - self.url_other_user, - access_token=self.admin_user_tok, - content={"threepids": {"address": "value"}}, - ) - self.assertEqual(400, channel.code, msg=channel.json_body) - self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) - def test_get_user(self) -> None: """ Test a simple get of a user. @@ -1890,8 +2003,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(201, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) self.assertTrue(channel.json_body["admin"]) self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"]) self._check_fields(channel.json_body) @@ -1906,8 +2017,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) self.assertTrue(channel.json_body["admin"]) self.assertFalse(channel.json_body["is_guest"]) self.assertFalse(channel.json_body["deactivated"]) @@ -1945,9 +2054,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(201, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) - self.assertEqual(1, len(channel.json_body["threepids"])) self.assertEqual( "external_id1", channel.json_body["external_ids"][0]["external_id"] ) @@ -1969,8 +2075,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) self.assertFalse(channel.json_body["admin"]) self.assertFalse(channel.json_body["is_guest"]) self.assertFalse(channel.json_body["deactivated"]) @@ -2062,123 +2166,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual("@bob:test", channel.json_body["name"]) self.assertFalse(channel.json_body["admin"]) - @override_config( - { - "email": { - "enable_notifs": True, - "notif_for_new_users": True, - "notif_from": "test@example.com", - }, - "public_baseurl": "https://example.com", - } - ) - def test_create_user_email_notif_for_new_users(self) -> None: - """ - Check that a new regular user is created successfully and - got an email pusher. - """ - url = self.url_prefix % "@bob:test" - - # Create user - body = { - "password": "abc123", - # Note that the given email is not in canonical form. - "threepids": [{"medium": "email", "address": "Bob@bob.bob"}], - } - - channel = self.make_request( - "PUT", - url, - access_token=self.admin_user_tok, - content=body, - ) - - self.assertEqual(201, channel.code, msg=channel.json_body) - self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) - - pushers = list( - self.get_success(self.store.get_pushers_by({"user_name": "@bob:test"})) - ) - self.assertEqual(len(pushers), 1) - self.assertEqual("@bob:test", pushers[0].user_name) - - @override_config( - { - "email": { - "enable_notifs": False, - "notif_for_new_users": False, - "notif_from": "test@example.com", - }, - "public_baseurl": "https://example.com", - } - ) - def test_create_user_email_no_notif_for_new_users(self) -> None: - """ - Check that a new regular user is created successfully and - got not an email pusher. - """ - url = self.url_prefix % "@bob:test" - - # Create user - body = { - "password": "abc123", - "threepids": [{"medium": "email", "address": "bob@bob.bob"}], - } - - channel = self.make_request( - "PUT", - url, - access_token=self.admin_user_tok, - content=body, - ) - - self.assertEqual(201, channel.code, msg=channel.json_body) - self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) - - pushers = list( - self.get_success(self.store.get_pushers_by({"user_name": "@bob:test"})) - ) - self.assertEqual(len(pushers), 0) - - @override_config( - { - "email": { - "enable_notifs": True, - "notif_for_new_users": True, - "notif_from": "test@example.com", - }, - "public_baseurl": "https://example.com", - } - ) - def test_create_user_email_notif_for_new_users_with_msisdn_threepid(self) -> None: - """ - Check that a new regular user is created successfully when they have a msisdn - threepid and email notif_for_new_users is set to True. - """ - url = self.url_prefix % "@bob:test" - - # Create user - body = { - "password": "abc123", - "threepids": [{"medium": "msisdn", "address": "1234567890"}], - } - - channel = self.make_request( - "PUT", - url, - access_token=self.admin_user_tok, - content=body, - ) - - self.assertEqual(201, channel.code, msg=channel.json_body) - self.assertEqual("@bob:test", channel.json_body["name"]) - self.assertEqual("msisdn", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("1234567890", channel.json_body["threepids"][0]["address"]) - def test_set_password(self) -> None: """ Test setting a new password for another user. @@ -2222,89 +2209,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual("foobar", channel.json_body["displayname"]) - - def test_set_threepid(self) -> None: - """ - Test setting threepid for an other user. - """ - - # Add two threepids to user - channel = self.make_request( - "PUT", - self.url_other_user, - access_token=self.admin_user_tok, - content={ - "threepids": [ - {"medium": "email", "address": "bob1@bob.bob"}, - {"medium": "email", "address": "bob2@bob.bob"}, - ], - }, - ) - - self.assertEqual(200, channel.code, msg=channel.json_body) - self.assertEqual("@user:test", channel.json_body["name"]) - self.assertEqual(2, len(channel.json_body["threepids"])) - # result does not always have the same sort order, therefore it becomes sorted - sorted_result = sorted( - channel.json_body["threepids"], key=lambda k: k["address"] - ) - self.assertEqual("email", sorted_result[0]["medium"]) - self.assertEqual("bob1@bob.bob", sorted_result[0]["address"]) - self.assertEqual("email", sorted_result[1]["medium"]) - self.assertEqual("bob2@bob.bob", sorted_result[1]["address"]) - self._check_fields(channel.json_body) - - # Set a new and remove a threepid - channel = self.make_request( - "PUT", - self.url_other_user, - access_token=self.admin_user_tok, - content={ - "threepids": [ - {"medium": "email", "address": "bob2@bob.bob"}, - {"medium": "email", "address": "bob3@bob.bob"}, - ], - }, - ) - - self.assertEqual(200, channel.code, msg=channel.json_body) - self.assertEqual("@user:test", channel.json_body["name"]) - self.assertEqual(2, len(channel.json_body["threepids"])) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("bob2@bob.bob", channel.json_body["threepids"][0]["address"]) - self.assertEqual("email", channel.json_body["threepids"][1]["medium"]) - self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][1]["address"]) - self._check_fields(channel.json_body) - - # Get user - channel = self.make_request( - "GET", - self.url_other_user, - access_token=self.admin_user_tok, - ) - - self.assertEqual(200, channel.code, msg=channel.json_body) - self.assertEqual("@user:test", channel.json_body["name"]) - self.assertEqual(2, len(channel.json_body["threepids"])) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("bob2@bob.bob", channel.json_body["threepids"][0]["address"]) - self.assertEqual("email", channel.json_body["threepids"][1]["medium"]) - self.assertEqual("bob3@bob.bob", channel.json_body["threepids"][1]["address"]) - self._check_fields(channel.json_body) - - # Remove threepids - channel = self.make_request( - "PUT", - self.url_other_user, - access_token=self.admin_user_tok, - content={"threepids": []}, - ) - self.assertEqual(200, channel.code, msg=channel.json_body) - self.assertEqual("@user:test", channel.json_body["name"]) - self.assertEqual(0, len(channel.json_body["threepids"])) - self._check_fields(channel.json_body) - - def test_set_duplicate_threepid(self) -> None: """ Test setting the same threepid for a second user. First user loses and second user gets mapping of this threepid. @@ -2328,9 +2232,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(first_user, channel.json_body["name"]) - self.assertEqual(1, len(channel.json_body["threepids"])) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("bob1@bob.bob", channel.json_body["threepids"][0]["address"]) self._check_fields(channel.json_body) # Add threepids to other user @@ -2347,9 +2248,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) - self.assertEqual(1, len(channel.json_body["threepids"])) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual("bob2@bob.bob", channel.json_body["threepids"][0]["address"]) self._check_fields(channel.json_body) # Add two new threepids to other user @@ -2369,15 +2267,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): # other user has this two threepids self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) - self.assertEqual(2, len(channel.json_body["threepids"])) - # result does not always have the same sort order, therefore it becomes sorted - sorted_result = sorted( - channel.json_body["threepids"], key=lambda k: k["address"] - ) - self.assertEqual("email", sorted_result[0]["medium"]) - self.assertEqual("bob1@bob.bob", sorted_result[0]["address"]) - self.assertEqual("email", sorted_result[1]["medium"]) - self.assertEqual("bob3@bob.bob", sorted_result[1]["address"]) self._check_fields(channel.json_body) # first_user has no threepid anymore @@ -2388,7 +2277,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(first_user, channel.json_body["name"]) - self.assertEqual(0, len(channel.json_body["threepids"])) self._check_fields(channel.json_body) def test_set_external_id(self) -> None: @@ -2623,9 +2511,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): UserID.from_string("@user:test"), "mxc://servername/mediaid" ) ) - self.get_success( - self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0) - ) # Get user channel = self.make_request( @@ -2637,7 +2522,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertFalse(channel.json_body["deactivated"]) - self.assertEqual("foo@bar.com", channel.json_body["threepids"][0]["address"]) self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"]) self.assertEqual("User", channel.json_body["displayname"]) @@ -2652,7 +2536,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["deactivated"]) - self.assertEqual(0, len(channel.json_body["threepids"])) self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"]) self.assertEqual("User", channel.json_body["displayname"]) @@ -2671,7 +2554,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["deactivated"]) - self.assertEqual(0, len(channel.json_body["threepids"])) self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"]) self.assertEqual("User", channel.json_body["displayname"]) @@ -2965,22 +2847,18 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["admin"]) - def test_set_user_type(self) -> None: - """ - Test changing user type. - """ - - # Set to support type + def set_user_type(self, user_type: Optional[str]) -> None: + # Set to user_type channel = self.make_request( "PUT", self.url_other_user, access_token=self.admin_user_tok, - content={"user_type": UserTypes.SUPPORT}, + content={"user_type": user_type}, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) - self.assertEqual(UserTypes.SUPPORT, channel.json_body["user_type"]) + self.assertEqual(user_type, channel.json_body["user_type"]) # Get user channel = self.make_request( @@ -2991,30 +2869,44 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) - self.assertEqual(UserTypes.SUPPORT, channel.json_body["user_type"]) + self.assertEqual(user_type, channel.json_body["user_type"]) + + def test_set_user_type(self) -> None: + """ + Test changing user type. + """ + + # Set to support type + self.set_user_type(UserTypes.SUPPORT) # Change back to a regular user - channel = self.make_request( - "PUT", - self.url_other_user, - access_token=self.admin_user_tok, - content={"user_type": None}, - ) + self.set_user_type(None) - self.assertEqual(200, channel.code, msg=channel.json_body) - self.assertEqual("@user:test", channel.json_body["name"]) - self.assertIsNone(channel.json_body["user_type"]) + @override_config({"user_types": {"extra_user_types": ["extra1", "extra2"]}}) + def test_set_user_type_with_extras(self) -> None: + """ + Test changing user type with extra_user_types configured. + """ - # Get user + # Check that we can still set to support type + self.set_user_type(UserTypes.SUPPORT) + + # Check that we can set to an extra user type + self.set_user_type("extra2") + + # Change back to a regular user + self.set_user_type(None) + + # Try setting to invalid type channel = self.make_request( - "GET", + "PUT", self.url_other_user, access_token=self.admin_user_tok, + content={"user_type": "extra3"}, ) - self.assertEqual(200, channel.code, msg=channel.json_body) - self.assertEqual("@user:test", channel.json_body["name"]) - self.assertIsNone(channel.json_body["user_type"]) + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Invalid user type", channel.json_body["error"]) def test_accidental_deactivation_prevention(self) -> None: """ @@ -3204,7 +3096,6 @@ class UserRestTestCase(unittest.HomeserverTestCase): content: Content dictionary to check """ self.assertIn("displayname", content) - self.assertIn("threepids", content) self.assertIn("avatar_url", content) self.assertIn("admin", content) self.assertIn("deactivated", content) @@ -3217,6 +3108,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertIn("consent_ts", content) self.assertIn("external_ids", content) self.assertIn("last_seen_ts", content) + self.assertIn("suspended", content) # This key was removed intentionally. Ensure it is not accidentally re-included. self.assertNotIn("password_hash", content) @@ -3513,6 +3405,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, + media.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -3692,7 +3585,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): @parameterized.expand(["GET", "DELETE"]) def test_invalid_parameter(self, method: str) -> None: """If parameters are invalid, an error is returned.""" - # unkown order_by + # unknown order_by channel = self.make_request( method, self.url + "?order_by=bar", @@ -3887,9 +3780,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): image_data1 = SMALL_PNG # Resolution: 1×1, MIME type: image/gif, Extension: gif, Size: 35 B image_data2 = unhexlify( - b"47494638376101000100800100000000" - b"ffffff2c00000000010001000002024c" - b"01003b" + b"47494638376101000100800100000000ffffff2c00000000010001000002024c01003b" ) # Resolution: 1×1, MIME type: image/bmp, Extension: bmp, Size: 54 B image_data3 = unhexlify( @@ -4019,7 +3910,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): # Try to access a media and to create `last_access_ts` channel = self.make_request( "GET", - f"/_matrix/media/v3/download/{server_and_media_id}", + f"/_matrix/client/v1/media/download/{server_and_media_id}", shorthand=False, access_token=user_token, ) @@ -4858,100 +4749,6 @@ class UsersByExternalIdTestCase(unittest.HomeserverTestCase): ) -class UsersByThreePidTestCase(unittest.HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets, - login.register_servlets, - ] - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - - self.admin_user = self.register_user("admin", "pass", admin=True) - self.admin_user_tok = self.login("admin", "pass") - - self.other_user = self.register_user("user", "pass") - self.get_success( - self.store.user_add_threepid( - self.other_user, "email", "user@email.com", 1, 1 - ) - ) - self.get_success( - self.store.user_add_threepid(self.other_user, "msidn", "+1-12345678", 1, 1) - ) - - def test_no_auth(self) -> None: - """Try to look up a user without authentication.""" - url = "/_synapse/admin/v1/threepid/email/users/user%40email.com" - - channel = self.make_request( - "GET", - url, - ) - - self.assertEqual(401, channel.code, msg=channel.json_body) - self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - - def test_medium_does_not_exist(self) -> None: - """Tests that both a lookup for a medium that does not exist and a user that - doesn't exist with that third party ID returns a 404""" - # test for unknown medium - url = "/_synapse/admin/v1/threepid/publickey/users/unknown-key" - - channel = self.make_request( - "GET", - url, - access_token=self.admin_user_tok, - ) - - self.assertEqual(404, channel.code, msg=channel.json_body) - self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) - - # test for unknown user with a known medium - url = "/_synapse/admin/v1/threepid/email/users/unknown" - - channel = self.make_request( - "GET", - url, - access_token=self.admin_user_tok, - ) - - self.assertEqual(404, channel.code, msg=channel.json_body) - self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) - - def test_success(self) -> None: - """Tests a successful medium + address lookup""" - # test for email medium with encoded value of user@email.com - url = "/_synapse/admin/v1/threepid/email/users/user%40email.com" - - channel = self.make_request( - "GET", - url, - access_token=self.admin_user_tok, - ) - - self.assertEqual(200, channel.code, msg=channel.json_body) - self.assertEqual( - {"user_id": self.other_user}, - channel.json_body, - ) - - # test for msidn medium with encoded value of +1-12345678 - url = "/_synapse/admin/v1/threepid/msidn/users/%2B1-12345678" - - channel = self.make_request( - "GET", - url, - access_token=self.admin_user_tok, - ) - - self.assertEqual(200, channel.code, msg=channel.json_body) - self.assertEqual( - {"user_id": self.other_user}, - channel.json_body, - ) - - class AllowCrossSigningReplacementTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, @@ -5024,7 +4821,6 @@ class UserSuspensionTestCase(unittest.HomeserverTestCase): self.store = hs.get_datastores().main - @override_config({"experimental_features": {"msc3823_account_suspension": True}}) def test_suspend_user(self) -> None: # test that suspending user works channel = self.make_request( @@ -5089,3 +4885,766 @@ class UserSuspensionTestCase(unittest.HomeserverTestCase): res5 = self.get_success(self.store.get_user_suspended_status(self.bad_user)) self.assertEqual(True, res5) + + +class UserRedactionTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + admin.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.admin = self.register_user("thomas", "pass", True) + self.admin_tok = self.login("thomas", "pass") + + self.bad_user = self.register_user("teresa", "pass") + self.bad_user_tok = self.login("teresa", "pass") + + self.store = hs.get_datastores().main + + self.spam_checker = hs.get_module_api_callbacks().spam_checker + + # create rooms - room versions 11+ store the `redacts` key in content while + # earlier ones don't so we use a mix of room versions + self.rm1 = self.helper.create_room_as( + self.admin, tok=self.admin_tok, room_version="7" + ) + self.rm2 = self.helper.create_room_as(self.admin, tok=self.admin_tok) + self.rm3 = self.helper.create_room_as( + self.admin, tok=self.admin_tok, room_version="11" + ) + + def test_redact_messages_all_rooms(self) -> None: + """ + Test that request to redact events in all rooms user is member of is successful + """ + # join rooms, send some messages + originals = [] + for rm in [self.rm1, self.rm2, self.rm3]: + join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) + originals.append(join["event_id"]) + for i in range(15): + event = {"body": f"hello{i}", "msgtype": "m.text"} + res = self.helper.send_event( + rm, "m.room.message", event, tok=self.bad_user_tok, expect_code=200 + ) + originals.append(res["event_id"]) + + # redact all events in all rooms + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": []}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + matched = [] + for rm in [self.rm1, self.rm2, self.rm3]: + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel = self.make_request( + "GET", + f"rooms/{rm}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + for event in channel.json_body["chunk"]: + for event_id in originals: + if ( + event["type"] == "m.room.redaction" + and event["redacts"] == event_id + ): + matched.append(event_id) + self.assertEqual(len(matched), len(originals)) + + def test_redact_messages_specific_rooms(self) -> None: + """ + Test that request to redact events in specified rooms user is member of is successful + """ + + originals = [] + for rm in [self.rm1, self.rm2, self.rm3]: + join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) + originals.append(join["event_id"]) + for i in range(15): + event = {"body": f"hello{i}", "msgtype": "m.text"} + res = self.helper.send_event( + rm, "m.room.message", event, tok=self.bad_user_tok + ) + originals.append(res["event_id"]) + + # redact messages in rooms 1 and 3 + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": [self.rm1, self.rm3]}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + # messages in requested rooms are redacted + for rm in [self.rm1, self.rm3]: + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel = self.make_request( + "GET", + f"rooms/{rm}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + matches = [] + for event in channel.json_body["chunk"]: + for event_id in originals: + if ( + event["type"] == "m.room.redaction" + and event["redacts"] == event_id + ): + matches.append((event_id, event)) + # we redacted 16 messages + self.assertEqual(len(matches), 16) + + channel = self.make_request( + "GET", f"rooms/{self.rm2}/messages?limit=50", access_token=self.admin_tok + ) + self.assertEqual(channel.code, 200) + + # messages in remaining room are not + for event in channel.json_body["chunk"]: + if event["type"] == "m.room.redaction": + self.fail("found redaction in room 2") + + def test_redact_status(self) -> None: + rm2_originals = [] + for rm in [self.rm1, self.rm2, self.rm3]: + join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) + if rm == self.rm2: + rm2_originals.append(join["event_id"]) + for i in range(5): + event = {"body": f"hello{i}", "msgtype": "m.text"} + res = self.helper.send_event( + rm, "m.room.message", event, tok=self.bad_user_tok + ) + if rm == self.rm2: + rm2_originals.append(res["event_id"]) + + # redact messages in rooms 1 and 3 + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": [self.rm1, self.rm3]}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + id = channel.json_body.get("redact_id") + + channel2 = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id}", + access_token=self.admin_tok, + ) + self.assertEqual(channel2.code, 200) + self.assertEqual(channel2.json_body.get("status"), "complete") + self.assertEqual(channel2.json_body.get("failed_redactions"), {}) + + # mock that will cause persisting the redaction events to fail + async def check_event_for_spam(event: str) -> str: + return "spam" + + self.spam_checker.check_event_for_spam = check_event_for_spam # type: ignore + + channel3 = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": [self.rm2]}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + id = channel3.json_body.get("redact_id") + + channel4 = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id}", + access_token=self.admin_tok, + ) + self.assertEqual(channel4.code, 200) + self.assertEqual(channel4.json_body.get("status"), "complete") + failed_redactions = channel4.json_body.get("failed_redactions") + assert failed_redactions is not None + matched = [] + for original in rm2_originals: + if failed_redactions.get(original) is not None: + matched.append(original) + self.assertEqual(len(matched), len(rm2_originals)) + + def test_admin_redact_works_if_user_kicked_or_banned(self) -> None: + originals1 = [] + originals2 = [] + for rm in [self.rm1, self.rm2, self.rm3]: + join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) + if rm in [self.rm1, self.rm3]: + originals1.append(join["event_id"]) + else: + originals2.append(join["event_id"]) + for i in range(5): + event = {"body": f"hello{i}", "msgtype": "m.text"} + res = self.helper.send_event( + rm, "m.room.message", event, tok=self.bad_user_tok + ) + if rm in [self.rm1, self.rm3]: + originals1.append(res["event_id"]) + else: + originals2.append(res["event_id"]) + + # kick user from rooms 1 and 3 + for r in [self.rm1, self.rm3]: + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{r}/kick", + content={"reason": "being a bummer", "user_id": self.bad_user}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + # redact messages in room 1 and 3 + channel1 = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": [self.rm1, self.rm3]}, + access_token=self.admin_tok, + ) + self.assertEqual(channel1.code, 200) + id = channel1.json_body.get("redact_id") + + # check that there were no failed redactions in room 1 and 3 + channel2 = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id}", + access_token=self.admin_tok, + ) + self.assertEqual(channel2.code, 200) + self.assertEqual(channel2.json_body.get("status"), "complete") + failed_redactions = channel2.json_body.get("failed_redactions") + self.assertEqual(failed_redactions, {}) + + # double check + for rm in [self.rm1, self.rm3]: + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel3 = self.make_request( + "GET", + f"rooms/{rm}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel3.code, 200) + + matches = [] + for event in channel3.json_body["chunk"]: + for event_id in originals1: + if ( + event["type"] == "m.room.redaction" + and event["redacts"] == event_id + ): + matches.append((event_id, event)) + # we redacted 6 messages + self.assertEqual(len(matches), 6) + + # ban user from room 2 + channel4 = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{self.rm2}/ban", + content={"reason": "being a bummer", "user_id": self.bad_user}, + access_token=self.admin_tok, + ) + self.assertEqual(channel4.code, HTTPStatus.OK, channel4.result) + + # make a request to ban all user's messages + channel5 = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": []}, + access_token=self.admin_tok, + ) + self.assertEqual(channel5.code, 200) + id2 = channel5.json_body.get("redact_id") + + # check that there were no failed redactions in room 2 + channel6 = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id2}", + access_token=self.admin_tok, + ) + self.assertEqual(channel6.code, 200) + self.assertEqual(channel6.json_body.get("status"), "complete") + failed_redactions = channel6.json_body.get("failed_redactions") + self.assertEqual(failed_redactions, {}) + + # double check messages in room 2 were redacted + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel7 = self.make_request( + "GET", + f"rooms/{self.rm2}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel7.code, 200) + + matches = [] + for event in channel7.json_body["chunk"]: + for event_id in originals2: + if event["type"] == "m.room.redaction" and event["redacts"] == event_id: + matches.append((event_id, event)) + # we redacted 6 messages + self.assertEqual(len(matches), 6) + + def test_redactions_for_remote_user_succeed_with_admin_priv_in_room(self) -> None: + """ + Test that if the admin requester has privileges in a room, redaction requests + succeed for a remote user + """ + + # inject some messages from remote user and collect event ids + original_message_ids = [] + for i in range(5): + event = self.get_success( + inject_event( + self.hs, + room_id=self.rm1, + type="m.room.message", + sender="@remote:remote_server", + content={"msgtype": "m.text", "body": f"nefarious_chatter{i}"}, + ) + ) + original_message_ids.append(event.event_id) + + # send a request to redact a remote user's messages in a room. + # the server admin created this room and has admin privilege in room + channel = self.make_request( + "POST", + "/_synapse/admin/v1/user/@remote:remote_server/redact", + content={"rooms": [self.rm1]}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + id = channel.json_body.get("redact_id") + + # check that there were no failed redactions + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id}", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body.get("status"), "complete") + failed_redactions = channel.json_body.get("failed_redactions") + self.assertEqual(failed_redactions, {}) + + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel = self.make_request( + "GET", + f"rooms/{self.rm1}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + for event in channel.json_body["chunk"]: + for event_id in original_message_ids: + if event["type"] == "m.room.redaction" and event["redacts"] == event_id: + original_message_ids.remove(event_id) + break + # we originally sent 5 messages so 5 should be redacted + self.assertEqual(len(original_message_ids), 0) + + def test_redact_redacts_encrypted_messages(self) -> None: + """ + Test that user's encrypted messages are redacted + """ + encrypted_room = self.helper.create_room_as( + self.admin, tok=self.admin_tok, room_version="7" + ) + self.helper.send_state( + encrypted_room, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=self.admin_tok, + ) + # join room send some messages + originals = [] + join = self.helper.join(encrypted_room, self.bad_user, tok=self.bad_user_tok) + originals.append(join["event_id"]) + for _ in range(15): + res = self.helper.send_event( + encrypted_room, "m.room.encrypted", {}, tok=self.bad_user_tok + ) + originals.append(res["event_id"]) + + # redact user's events + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": []}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + matched = [] + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel = self.make_request( + "GET", + f"rooms/{encrypted_room}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + for event in channel.json_body["chunk"]: + for event_id in originals: + if event["type"] == "m.room.redaction" and event["redacts"] == event_id: + matched.append(event_id) + self.assertEqual(len(matched), len(originals)) + + +class UserRedactionBackgroundTaskTestCase(BaseMultiWorkerStreamTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + admin.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.admin = self.register_user("thomas", "pass", True) + self.admin_tok = self.login("thomas", "pass") + + self.bad_user = self.register_user("teresa", "pass") + self.bad_user_tok = self.login("teresa", "pass") + + # create rooms - room versions 11+ store the `redacts` key in content while + # earlier ones don't so we use a mix of room versions + self.rm1 = self.helper.create_room_as( + self.admin, tok=self.admin_tok, room_version="7" + ) + self.rm2 = self.helper.create_room_as(self.admin, tok=self.admin_tok) + self.rm3 = self.helper.create_room_as( + self.admin, tok=self.admin_tok, room_version="11" + ) + + @override_config({"run_background_tasks_on": "worker1"}) + def test_redact_messages_all_rooms(self) -> None: + """ + Test that redact task successfully runs when `run_background_tasks_on` is specified + """ + self.make_worker_hs( + "synapse.app.generic_worker", + extra_config={ + "worker_name": "worker1", + "run_background_tasks_on": "worker1", + "redis": {"enabled": True}, + }, + ) + + # join rooms, send some messages + original_event_ids = set() + for rm in [self.rm1, self.rm2, self.rm3]: + join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) + original_event_ids.add(join["event_id"]) + for i in range(15): + event = {"body": f"hello{i}", "msgtype": "m.text"} + res = self.helper.send_event( + rm, "m.room.message", event, tok=self.bad_user_tok, expect_code=200 + ) + original_event_ids.add(res["event_id"]) + + # redact all events in all rooms + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": []}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + id = channel.json_body.get("redact_id") + + timeout_s = 10 + start_time = time.time() + redact_result = "" + while redact_result != "complete": + if start_time + timeout_s < time.time(): + self.fail("Timed out waiting for redactions.") + + channel2 = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id}", + access_token=self.admin_tok, + ) + redact_result = channel2.json_body["status"] + if redact_result == "failed": + self.fail("Redaction task failed.") + + redaction_ids = set() + for rm in [self.rm1, self.rm2, self.rm3]: + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel = self.make_request( + "GET", + f"rooms/{rm}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + for event in channel.json_body["chunk"]: + if event["type"] == "m.room.redaction": + redaction_ids.add(event["redacts"]) + + self.assertIncludes(redaction_ids, original_event_ids, exact=True) + + +class GetInvitesFromUserTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + admin.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.admin = self.register_user("thomas", "pass", True) + self.admin_tok = self.login("thomas", "pass") + + self.bad_user = self.register_user("teresa", "pass") + self.bad_user_tok = self.login("teresa", "pass") + + self.random_users = [] + for i in range(4): + self.random_users.append(self.register_user(f"user{i}", f"pass{i}")) + + self.room1 = self.helper.create_room_as(self.bad_user, tok=self.bad_user_tok) + self.room2 = self.helper.create_room_as(self.bad_user, tok=self.bad_user_tok) + self.room3 = self.helper.create_room_as(self.bad_user, tok=self.bad_user_tok) + + @unittest.override_config( + {"rc_invites": {"per_issuer": {"per_second": 1000, "burst_count": 1000}}} + ) + def test_get_user_invite_count_new_invites_test_case(self) -> None: + """ + Test that new invites that arrive after a provided timestamp are counted + """ + # grab a current timestamp + before_invites_sent_ts = self.hs.get_clock().time_msec() + + # bad user sends some invites + for room_id in [self.room1, self.room2]: + for user in self.random_users: + self.helper.invite(room_id, self.bad_user, user, tok=self.bad_user_tok) + + # fetch using timestamp, all should be returned + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/users/{self.bad_user}/sent_invite_count?from_ts={before_invites_sent_ts}", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["invite_count"], 8) + + # send some more invites, they should show up in addition to original 8 using same timestamp + for user in self.random_users: + self.helper.invite( + self.room3, src=self.bad_user, targ=user, tok=self.bad_user_tok + ) + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/users/{self.bad_user}/sent_invite_count?from_ts={before_invites_sent_ts}", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["invite_count"], 12) + + def test_get_user_invite_count_invites_before_ts_test_case(self) -> None: + """ + Test that invites sent before provided ts are not counted + """ + # bad user sends some invites + for room_id in [self.room1, self.room2]: + for user in self.random_users: + self.helper.invite(room_id, self.bad_user, user, tok=self.bad_user_tok) + + # add a msec between last invite and ts + after_invites_sent_ts = self.hs.get_clock().time_msec() + 1 + + # fetch invites with timestamp, none should be returned + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/users/{self.bad_user}/sent_invite_count?from_ts={after_invites_sent_ts}", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["invite_count"], 0) + + def test_user_invite_count_kick_ban_not_counted(self) -> None: + """ + Test that kicks and bans are not counted in invite count + """ + to_kick_user_id = self.register_user("kick_me", "pass") + to_kick_tok = self.login("kick_me", "pass") + + self.helper.join(self.room1, to_kick_user_id, tok=to_kick_tok) + + # grab a current timestamp + before_invites_sent_ts = self.hs.get_clock().time_msec() + + # bad user sends some invites (8) + for room_id in [self.room1, self.room2]: + for user in self.random_users: + self.helper.invite( + room_id, src=self.bad_user, targ=user, tok=self.bad_user_tok + ) + + # fetch using timestamp, all invites sent should be counted + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/users/{self.bad_user}/sent_invite_count?from_ts={before_invites_sent_ts}", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["invite_count"], 8) + + # send a kick and some bans and make sure these aren't counted against invite total + for user in self.random_users: + self.helper.ban( + self.room1, src=self.bad_user, targ=user, tok=self.bad_user_tok + ) + + channel = self.make_request( + "POST", + f"/_matrix/client/v3/rooms/{self.room1}/kick", + content={"user_id": to_kick_user_id}, + access_token=self.bad_user_tok, + ) + self.assertEqual(channel.code, 200) + + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/users/{self.bad_user}/sent_invite_count?from_ts={before_invites_sent_ts}", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["invite_count"], 8) + + +class GetCumulativeJoinedRoomCountForUserTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + admin.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.admin = self.register_user("thomas", "pass", True) + self.admin_tok = self.login("thomas", "pass") + + self.bad_user = self.register_user("teresa", "pass") + self.bad_user_tok = self.login("teresa", "pass") + + def test_user_cumulative_joined_room_count(self) -> None: + """ + Tests proper count returned from /cumulative_joined_room_count endpoint + """ + # Create rooms and join, grab timestamp before room creation + before_room_creation_timestamp = self.hs.get_clock().time_msec() + + joined_rooms = [] + for _ in range(3): + room = self.helper.create_room_as(self.admin, tok=self.admin_tok) + self.helper.join( + room, user=self.bad_user, expect_code=200, tok=self.bad_user_tok + ) + joined_rooms.append(room) + + # get a timestamp after room creation and join, add a msec between last join and ts + after_room_creation = self.hs.get_clock().time_msec() + 1 + + # Get rooms using this timestamp, there should be none since all rooms were created and joined + # before provided timestamp + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/users/{self.bad_user}/cumulative_joined_room_count?from_ts={int(after_room_creation)}", + access_token=self.admin_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(0, channel.json_body["cumulative_joined_room_count"]) + + # fetch rooms with the older timestamp before they were created and joined, this should + # return the rooms + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/users/{self.bad_user}/cumulative_joined_room_count?from_ts={int(before_room_creation_timestamp)}", + access_token=self.admin_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual( + len(joined_rooms), channel.json_body["cumulative_joined_room_count"] + ) + + def test_user_joined_room_count_includes_left_and_banned_rooms(self) -> None: + """ + Tests proper count returned from /joined_room_count endpoint when user has left + or been banned from joined rooms + """ + # Create rooms and join, grab timestamp before room creation + before_room_creation_timestamp = self.hs.get_clock().time_msec() + + joined_rooms = [] + for _ in range(3): + room = self.helper.create_room_as(self.admin, tok=self.admin_tok) + self.helper.join( + room, user=self.bad_user, expect_code=200, tok=self.bad_user_tok + ) + joined_rooms.append(room) + + # fetch rooms with the older timestamp before they were created and joined + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/users/{self.bad_user}/cumulative_joined_room_count?from_ts={int(before_room_creation_timestamp)}", + access_token=self.admin_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual( + len(joined_rooms), channel.json_body["cumulative_joined_room_count"] + ) + + # have the user banned from/leave the joined rooms + self.helper.ban( + joined_rooms[0], + src=self.admin, + targ=self.bad_user, + expect_code=200, + tok=self.admin_tok, + ) + self.helper.change_membership( + joined_rooms[1], + src=self.bad_user, + targ=self.bad_user, + membership="leave", + expect_code=200, + tok=self.bad_user_tok, + ) + self.helper.ban( + joined_rooms[2], + src=self.admin, + targ=self.bad_user, + expect_code=200, + tok=self.admin_tok, + ) + + # fetch the joined room count again, the number should remain the same as the collected joined rooms + channel = self.make_request( + "GET", + f"/_synapse/admin/v1/users/{self.bad_user}/cumulative_joined_room_count?from_ts={int(before_room_creation_timestamp)}", + access_token=self.admin_tok, + ) + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual( + len(joined_rooms), channel.json_body["cumulative_joined_room_count"] + ) diff --git a/tests/rest/client/sliding_sync/test_connection_tracking.py b/tests/rest/client/sliding_sync/test_connection_tracking.py
index 6863c32f7c..5b819103c2 100644 --- a/tests/rest/client/sliding_sync/test_connection_tracking.py +++ b/tests/rest/client/sliding_sync/test_connection_tracking.py
@@ -13,7 +13,7 @@ # import logging -from parameterized import parameterized +from parameterized import parameterized, parameterized_class from twisted.test.proto_helpers import MemoryReactor @@ -28,6 +28,20 @@ from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase): """ Test connection tracking in the Sliding Sync API. @@ -44,6 +58,8 @@ class SlidingSyncConnectionTrackingTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + super().prepare(reactor, clock, hs) + def test_rooms_required_state_incremental_sync_LIVE(self) -> None: """Test that we only get state updates in incremental sync for rooms we've already seen (LIVE). diff --git a/tests/rest/client/sliding_sync/test_extension_account_data.py b/tests/rest/client/sliding_sync/test_extension_account_data.py
index 3482a5f887..799fbb1856 100644 --- a/tests/rest/client/sliding_sync/test_extension_account_data.py +++ b/tests/rest/client/sliding_sync/test_extension_account_data.py
@@ -11,8 +11,12 @@ # See the GNU Affero General Public License for more details: # <https://www.gnu.org/licenses/agpl-3.0.html>. # +import enum import logging +from parameterized import parameterized, parameterized_class +from typing_extensions import assert_never + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -28,6 +32,25 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +class TagAction(enum.Enum): + ADD = enum.auto() + REMOVE = enum.auto() + + +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): """Tests for the account_data sliding sync extension""" @@ -43,6 +66,8 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.account_data_handler = hs.get_account_data_handler() + super().prepare(reactor, clock, hs) + def test_no_data_initial_sync(self) -> None: """ Test that enabling the account_data extension works during an intitial sync, @@ -62,18 +87,23 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): } response_body, _ = self.do_sync(sync_body, tok=user1_tok) + global_account_data_map = { + global_event["type"]: global_event["content"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + } self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, + global_account_data_map.keys(), # Even though we don't have any global account data set, Synapse saves some # default push rules for us. {AccountDataTypes.PUSH_RULES}, exact=True, ) + # Push rules are a giant chunk of JSON data so we will just assume the value is correct if they key is here. + # global_account_data_map[AccountDataTypes.PUSH_RULES] + + # No room account data for this test self.assertIncludes( response_body["extensions"]["account_data"].get("rooms").keys(), set(), @@ -103,16 +133,19 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): # There has been no account data changes since the `from_token` so we shouldn't # see any account data here. + global_account_data_map = { + global_event["type"]: global_event["content"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + } self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, + global_account_data_map.keys(), set(), exact=True, ) + + # No room account data for this test self.assertIncludes( response_body["extensions"]["account_data"].get("rooms").keys(), set(), @@ -147,16 +180,24 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # It should show us all of the global account data + global_account_data_map = { + global_event["type"]: global_event["content"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + } self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, + global_account_data_map.keys(), {AccountDataTypes.PUSH_RULES, "org.matrix.foobarbaz"}, exact=True, ) + # Push rules are a giant chunk of JSON data so we will just assume the value is correct if they key is here. + # global_account_data_map[AccountDataTypes.PUSH_RULES] + self.assertEqual( + global_account_data_map["org.matrix.foobarbaz"], {"foo": "bar"} + ) + + # No room account data for this test self.assertIncludes( response_body["extensions"]["account_data"].get("rooms").keys(), set(), @@ -202,17 +243,23 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): # Make an incremental Sliding Sync request with the account_data extension enabled response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + global_account_data_map = { + global_event["type"]: global_event["content"] + for global_event in response_body["extensions"]["account_data"].get( + "global" + ) + } self.assertIncludes( - { - global_event["type"] - for global_event in response_body["extensions"]["account_data"].get( - "global" - ) - }, + global_account_data_map.keys(), # We should only see the new global account data that happened after the `from_token` {"org.matrix.doodardaz"}, exact=True, ) + self.assertEqual( + global_account_data_map["org.matrix.doodardaz"], {"doo": "dar"} + ) + + # No room account data for this test self.assertIncludes( response_body["extensions"]["account_data"].get("rooms").keys(), set(), @@ -237,6 +284,15 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + content={}, + ) + ) # Create another room with some room account data room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) @@ -248,6 +304,15 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + content={}, + ) + ) # Make an initial Sliding Sync request with the account_data extension enabled sync_body = { @@ -276,21 +341,36 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): {room_id1}, exact=True, ) + account_data_map = { + event["type"]: event["content"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id1) + } self.assertIncludes( - { - event["type"] - for event in response_body["extensions"]["account_data"] - .get("rooms") - .get(room_id1) - }, - {"org.matrix.roorarraz"}, + account_data_map.keys(), + {"org.matrix.roorarraz", AccountDataTypes.TAG}, exact=True, ) + self.assertEqual(account_data_map["org.matrix.roorarraz"], {"roo": "rar"}) + self.assertEqual( + account_data_map[AccountDataTypes.TAG], {"tags": {"m.favourite": {}}} + ) - def test_room_account_data_incremental_sync(self) -> None: + @parameterized.expand( + [ + ("add tags", TagAction.ADD), + ("remove tags", TagAction.REMOVE), + ] + ) + def test_room_account_data_incremental_sync( + self, test_description: str, tag_action: TagAction + ) -> None: """ On incremental sync, we return all account data for a given room but only for rooms that we request and are being returned in the Sliding Sync response. + + (HaveSentRoomFlag.LIVE) """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -305,6 +385,15 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + content={}, + ) + ) # Create another room with some room account data room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) @@ -316,6 +405,15 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + content={}, + ) + ) sync_body = { "lists": {}, @@ -351,6 +449,42 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): content={"roo": "rar"}, ) ) + if tag_action == TagAction.ADD: + # Add another room tag + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.server_notice", + content={}, + ) + ) + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.server_notice", + content={}, + ) + ) + elif tag_action == TagAction.REMOVE: + # Remove the room tag + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + ) + ) + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + ) + ) + else: + assert_never(tag_action) # Make an incremental Sliding Sync request with the account_data extension enabled response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) @@ -365,17 +499,444 @@ class SlidingSyncAccountDataExtensionTestCase(SlidingSyncBase): exact=True, ) # We should only see the new room account data that happened after the `from_token` + account_data_map = { + event["type"]: event["content"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id1) + } self.assertIncludes( + account_data_map.keys(), + {"org.matrix.roorarraz2", AccountDataTypes.TAG}, + exact=True, + ) + self.assertEqual(account_data_map["org.matrix.roorarraz2"], {"roo": "rar"}) + if tag_action == TagAction.ADD: + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {"m.favourite": {}, "m.server_notice": {}}}, + ) + elif tag_action == TagAction.REMOVE: + # If we previously showed the client that the room has tags, when it no + # longer has tags, we need to show them an empty map. + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {}}, + ) + else: + assert_never(tag_action) + + @parameterized.expand( + [ + ("add tags", TagAction.ADD), + ("remove tags", TagAction.REMOVE), + ] + ) + def test_room_account_data_incremental_sync_out_of_range_never( + self, test_description: str, tag_action: TagAction + ) -> None: + """Tests that we don't return account data for rooms that are out of + range, but then do send all account data once they're in range. + + (initial/HaveSentRoomFlag.NEVER) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room and add some room account data + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + content={}, + ) + ) + + # Create another room with some room account data + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + content={}, + ) + ) + + # Now send a message into room1 so that it is at the top of the list + self.helper.send(room_id1, body="new event", tok=user1_tok) + + # Make a SS request for only the top room. + sync_body = { + "lists": { + "main": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 0, + } + }, + "extensions": { + "account_data": { + "enabled": True, + "lists": ["main"], + } + }, + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Only room1 should be in the response since it's the latest room with activity + # and our range only includes 1 room. + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + + # Add some other room account data + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + if tag_action == TagAction.ADD: + # Add another room tag + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.server_notice", + content={}, + ) + ) + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.server_notice", + content={}, + ) + ) + elif tag_action == TagAction.REMOVE: + # Remove the room tag + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + ) + ) + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + ) + ) + else: + assert_never(tag_action) + + # Move room2 into range. + self.helper.send(room_id2, body="new event", tok=user1_tok) + + # Make an incremental Sliding Sync request with the account_data extension enabled + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + self.assertIsNotNone(response_body["extensions"]["account_data"].get("global")) + # We expect to see the account data of room2, as that has the most + # recent update. + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id2}, + exact=True, + ) + # Since this is the first time we're seeing room2 down sync, we should see all + # room account data for it. + account_data_map = { + event["type"]: event["content"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id2) + } + expected_account_data_keys = { + "org.matrix.roorarraz", + "org.matrix.roorarraz2", + } + if tag_action == TagAction.ADD: + expected_account_data_keys.add(AccountDataTypes.TAG) + self.assertIncludes( + account_data_map.keys(), + expected_account_data_keys, + exact=True, + ) + self.assertEqual(account_data_map["org.matrix.roorarraz"], {"roo": "rar"}) + self.assertEqual(account_data_map["org.matrix.roorarraz2"], {"roo": "rar"}) + if tag_action == TagAction.ADD: + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {"m.favourite": {}, "m.server_notice": {}}}, + ) + elif tag_action == TagAction.REMOVE: + # Since we never told the client about the room tags, we don't need to say + # anything if there are no tags now (the client doesn't need an update). + self.assertIsNone( + account_data_map.get(AccountDataTypes.TAG), + account_data_map, + ) + else: + assert_never(tag_action) + + @parameterized.expand( + [ + ("add tags", TagAction.ADD), + ("remove tags", TagAction.REMOVE), + ] + ) + def test_room_account_data_incremental_sync_out_of_range_previously( + self, test_description: str, tag_action: TagAction + ) -> None: + """Tests that we don't return account data for rooms that fall out of + range, but then do send all account data that has changed they're back in range. + + (HaveSentRoomFlag.PREVIOUSLY) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room and add some room account data + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + content={}, + ) + ) + + # Create another room with some room account data + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz", + content={"roo": "rar"}, + ) + ) + # Add a room tag to mark the room as a favourite + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + content={}, + ) + ) + + # Make an initial Sliding Sync request for only room1 and room2. + sync_body = { + "lists": {}, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + }, + room_id2: { + "required_state": [], + "timeline_limit": 0, + }, + }, + "extensions": { + "account_data": { + "enabled": True, + "rooms": [room_id1, room_id2], + } + }, + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Both rooms show up because we have a room subscription for each and they're + # requested in the `account_data` extension. + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id1, room_id2}, + exact=True, + ) + + # Add some other room account data + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id1, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + self.get_success( + self.account_data_handler.add_account_data_to_room( + user_id=user1_id, + room_id=room_id2, + account_data_type="org.matrix.roorarraz2", + content={"roo": "rar"}, + ) + ) + if tag_action == TagAction.ADD: + # Add another room tag + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id1, + tag="m.server_notice", + content={}, + ) + ) + self.get_success( + self.account_data_handler.add_tag_to_room( + user_id=user1_id, + room_id=room_id2, + tag="m.server_notice", + content={}, + ) + ) + elif tag_action == TagAction.REMOVE: + # Remove the room tag + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id1, + tag="m.favourite", + ) + ) + self.get_success( + self.account_data_handler.remove_tag_from_room( + user_id=user1_id, + room_id=room_id2, + tag="m.favourite", + ) + ) + else: + assert_never(tag_action) + + # Make an incremental Sliding Sync request for just room1 + response_body, from_token = self.do_sync( { - event["type"] - for event in response_body["extensions"]["account_data"] - .get("rooms") - .get(room_id1) + **sync_body, + "room_subscriptions": { + room_id1: { + "required_state": [], + "timeline_limit": 0, + }, + }, }, - {"org.matrix.roorarraz2"}, + since=from_token, + tok=user1_tok, + ) + + # Only room1 shows up because we only have a room subscription for room1 now. + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id1}, exact=True, ) + # Make an incremental Sliding Sync request for just room2 now + response_body, from_token = self.do_sync( + { + **sync_body, + "room_subscriptions": { + room_id2: { + "required_state": [], + "timeline_limit": 0, + }, + }, + }, + since=from_token, + tok=user1_tok, + ) + + # Only room2 shows up because we only have a room subscription for room2 now. + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id2}, + exact=True, + ) + + self.assertIsNotNone(response_body["extensions"]["account_data"].get("global")) + # Check for room account data for room2 + self.assertIncludes( + response_body["extensions"]["account_data"].get("rooms").keys(), + {room_id2}, + exact=True, + ) + # We should see any room account data updates for room2 since the last + # time we saw it down sync + account_data_map = { + event["type"]: event["content"] + for event in response_body["extensions"]["account_data"] + .get("rooms") + .get(room_id2) + } + self.assertIncludes( + account_data_map.keys(), + {"org.matrix.roorarraz2", AccountDataTypes.TAG}, + exact=True, + ) + self.assertEqual(account_data_map["org.matrix.roorarraz2"], {"roo": "rar"}) + if tag_action == TagAction.ADD: + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {"m.favourite": {}, "m.server_notice": {}}}, + ) + elif tag_action == TagAction.REMOVE: + # If we previously showed the client that the room has tags, when it no + # longer has tags, we need to show them an empty map. + self.assertEqual( + account_data_map[AccountDataTypes.TAG], + {"tags": {}}, + ) + else: + assert_never(tag_action) + def test_wait_for_new_data(self) -> None: """ Test to make sure that the Sliding Sync request waits for new data to arrive. diff --git a/tests/rest/client/sliding_sync/test_extension_e2ee.py b/tests/rest/client/sliding_sync/test_extension_e2ee.py
index 320f8c788f..7ce6592d8f 100644 --- a/tests/rest/client/sliding_sync/test_extension_e2ee.py +++ b/tests/rest/client/sliding_sync/test_extension_e2ee.py
@@ -13,6 +13,8 @@ # import logging +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -27,6 +29,20 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): """Tests for the e2ee sliding sync extension""" @@ -42,6 +58,8 @@ class SlidingSyncE2eeExtensionTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.e2e_keys_handler = hs.get_e2e_keys_handler() + super().prepare(reactor, clock, hs) + def test_no_data_initial_sync(self) -> None: """ Test that enabling e2ee extension works during an intitial sync, even if there diff --git a/tests/rest/client/sliding_sync/test_extension_receipts.py b/tests/rest/client/sliding_sync/test_extension_receipts.py
index 65fbac260e..6e7700b533 100644 --- a/tests/rest/client/sliding_sync/test_extension_receipts.py +++ b/tests/rest/client/sliding_sync/test_extension_receipts.py
@@ -13,6 +13,8 @@ # import logging +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -28,6 +30,20 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): """Tests for the receipts sliding sync extension""" @@ -42,6 +58,8 @@ class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + super().prepare(reactor, clock, hs) + def test_no_data_initial_sync(self) -> None: """ Test that enabling the receipts extension works during an intitial sync, @@ -677,3 +695,240 @@ class SlidingSyncReceiptsExtensionTestCase(SlidingSyncBase): set(), exact=True, ) + + def test_receipts_incremental_sync_out_of_range(self) -> None: + """Tests that we don't return read receipts for rooms that fall out of + range, but then do send all read receipts once they're back in range. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id2, user1_id, tok=user1_tok) + + # Send a message and read receipt into room2 + event_response = self.helper.send(room_id2, body="new event", tok=user2_tok) + room2_event_id = event_response["event_id"] + + self.helper.send_read_receipt(room_id2, room2_event_id, tok=user1_tok) + + # Now send a message into room1 so that it is at the top of the list + self.helper.send(room_id1, body="new event", tok=user2_tok) + + # Make a SS request for only the top room. + sync_body = { + "lists": { + "main": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 5, + } + }, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # The receipt is in room2, but only room1 is returned, so we don't + # expect to get the receipt. + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) + + # Move room2 into range. + self.helper.send(room_id2, body="new event", tok=user2_tok) + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We expect to see the read receipt of room2, as that has the most + # recent update. + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id2}, + exact=True, + ) + receipt = response_body["extensions"]["receipts"]["rooms"][room_id2] + self.assertIncludes( + receipt["content"][room2_event_id][ReceiptTypes.READ].keys(), + {user1_id}, + exact=True, + ) + + # Send a message into room1 to bump it to the top, but also send a + # receipt in room2 + self.helper.send(room_id1, body="new event", tok=user2_tok) + self.helper.send_read_receipt(room_id2, room2_event_id, tok=user2_tok) + + # We don't expect to see the new read receipt. + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) + + # But if we send a new message into room2, we expect to get the missing receipts + self.helper.send(room_id2, body="new event", tok=user2_tok) + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id2}, + exact=True, + ) + + # We should only see the new receipt + receipt = response_body["extensions"]["receipts"]["rooms"][room_id2] + self.assertIncludes( + receipt["content"][room2_event_id][ReceiptTypes.READ].keys(), + {user2_id}, + exact=True, + ) + + def test_return_own_read_receipts(self) -> None: + """Test that we always send the user's own read receipts in initial + rooms, even if the receipts don't match events in the timeline.. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Send a message and read receipts into room1 + event_response = self.helper.send(room_id1, body="new event", tok=user2_tok) + room1_event_id = event_response["event_id"] + + self.helper.send_read_receipt(room_id1, room1_event_id, tok=user1_tok) + self.helper.send_read_receipt(room_id1, room1_event_id, tok=user2_tok) + + # Now send a message so the above message is not in the timeline. + self.helper.send(room_id1, body="new event", tok=user2_tok) + + # Make a SS request for only the latest message. + sync_body = { + "lists": { + "main": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + }, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # We should get our own receipt in room1, even though its not in the + # timeline limit. + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + + # We should only see our read receipt, not the other user's. + receipt = response_body["extensions"]["receipts"]["rooms"][room_id1] + self.assertIncludes( + receipt["content"][room1_event_id][ReceiptTypes.READ].keys(), + {user1_id}, + exact=True, + ) + + def test_read_receipts_expanded_timeline(self) -> None: + """Test that we get read receipts when we expand the timeline limit (`unstable_expanded_timeline`).""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Send a message and read receipt into room1 + event_response = self.helper.send(room_id1, body="new event", tok=user2_tok) + room1_event_id = event_response["event_id"] + + self.helper.send_read_receipt(room_id1, room1_event_id, tok=user2_tok) + + # Now send a message so the above message is not in the timeline. + self.helper.send(room_id1, body="new event", tok=user2_tok) + + # Make a SS request for only the latest message. + sync_body = { + "lists": { + "main": { + "ranges": [[0, 0]], + "required_state": [], + "timeline_limit": 1, + } + }, + "extensions": { + "receipts": { + "enabled": True, + } + }, + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # We shouldn't see user2 read receipt, as its not in the timeline + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + set(), + exact=True, + ) + + # Now do another request with a room subscription with an increased timeline limit + sync_body["room_subscriptions"] = { + room_id1: { + "required_state": [], + "timeline_limit": 2, + } + } + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # Assert that we did actually get an expanded timeline + room_response = response_body["rooms"][room_id1] + self.assertNotIn("initial", room_response) + self.assertEqual(room_response["unstable_expanded_timeline"], True) + + # We should now see user2 read receipt, as its in the expanded timeline + self.assertIncludes( + response_body["extensions"]["receipts"].get("rooms").keys(), + {room_id1}, + exact=True, + ) + + # We should only see our read receipt, not the other user's. + receipt = response_body["extensions"]["receipts"]["rooms"][room_id1] + self.assertIncludes( + receipt["content"][room1_event_id][ReceiptTypes.READ].keys(), + {user2_id}, + exact=True, + ) diff --git a/tests/rest/client/sliding_sync/test_extension_to_device.py b/tests/rest/client/sliding_sync/test_extension_to_device.py
index f8500812ea..790abb739d 100644 --- a/tests/rest/client/sliding_sync/test_extension_to_device.py +++ b/tests/rest/client/sliding_sync/test_extension_to_device.py
@@ -14,6 +14,8 @@ import logging from typing import List +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -28,6 +30,20 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): """Tests for the to-device sliding sync extension""" @@ -40,6 +56,7 @@ class SlidingSyncToDeviceExtensionTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + super().prepare(reactor, clock, hs) def _assert_to_device_response( self, response_body: JsonDict, expected_messages: List[JsonDict] diff --git a/tests/rest/client/sliding_sync/test_extension_typing.py b/tests/rest/client/sliding_sync/test_extension_typing.py
index 7f523e0f10..f87c3c8b17 100644 --- a/tests/rest/client/sliding_sync/test_extension_typing.py +++ b/tests/rest/client/sliding_sync/test_extension_typing.py
@@ -13,6 +13,8 @@ # import logging +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -28,6 +30,20 @@ from tests.server import TimedOutException logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncTypingExtensionTestCase(SlidingSyncBase): """Tests for the typing notification sliding sync extension""" @@ -41,6 +57,8 @@ class SlidingSyncTypingExtensionTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + super().prepare(reactor, clock, hs) + def test_no_data_initial_sync(self) -> None: """ Test that enabling the typing extension works during an intitial sync, diff --git a/tests/rest/client/sliding_sync/test_extensions.py b/tests/rest/client/sliding_sync/test_extensions.py
index 68f6661334..30230e5c4b 100644 --- a/tests/rest/client/sliding_sync/test_extensions.py +++ b/tests/rest/client/sliding_sync/test_extensions.py
@@ -14,7 +14,7 @@ import logging from typing import Literal -from parameterized import parameterized +from parameterized import parameterized, parameterized_class from typing_extensions import assert_never from twisted.test.proto_helpers import MemoryReactor @@ -30,6 +30,20 @@ from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncExtensionsTestCase(SlidingSyncBase): """ Test general extensions behavior in the Sliding Sync API. Each extension has their @@ -49,6 +63,8 @@ class SlidingSyncExtensionsTestCase(SlidingSyncBase): self.storage_controllers = hs.get_storage_controllers() self.account_data_handler = hs.get_account_data_handler() + super().prepare(reactor, clock, hs) + # Any extensions that use `lists`/`rooms` should be tested here @parameterized.expand([("account_data",), ("receipts",), ("typing",)]) def test_extensions_lists_rooms_relevant_rooms( @@ -120,19 +136,26 @@ class SlidingSyncExtensionsTestCase(SlidingSyncBase): "foo-list": { "ranges": [[0, 1]], "required_state": [], - "timeline_limit": 0, + # We set this to `1` because we're testing `receipts` which + # interact with the `timeline`. With receipts, when a room + # hasn't been sent down the connection before or it appears + # as `initial: true`, we only include receipts for events in + # the timeline to avoid bloating and blowing up the sync + # response as the number of users in the room increases. + # (this behavior is part of the spec) + "timeline_limit": 1, }, # We expect this list range to include room5, room4, room3 "bar-list": { "ranges": [[0, 2]], "required_state": [], - "timeline_limit": 0, + "timeline_limit": 1, }, }, "room_subscriptions": { room_id1: { "required_state": [], - "timeline_limit": 0, + "timeline_limit": 1, } }, } diff --git a/tests/rest/client/sliding_sync/test_lists_filters.py b/tests/rest/client/sliding_sync/test_lists_filters.py new file mode 100644
index 0000000000..c59f6aedc4 --- /dev/null +++ b/tests/rest/client/sliding_sync/test_lists_filters.py
@@ -0,0 +1,1975 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +import logging + +from parameterized import parameterized_class + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import ( + EventContentFields, + EventTypes, + RoomTypes, +) +from synapse.api.room_versions import RoomVersions +from synapse.events import StrippedStateEvent +from synapse.rest.client import login, room, sync, tags +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase + +logger = logging.getLogger(__name__) + + +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) +class SlidingSyncFiltersTestCase(SlidingSyncBase): + """ + Test `filters` in the Sliding Sync API to make sure it includes/excludes rooms + correctly. + """ + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + sync.register_servlets, + tags.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + self.storage_controllers = hs.get_storage_controllers() + self.account_data_handler = hs.get_account_data_handler() + + super().prepare(reactor, clock, hs) + + def test_multiple_filters_and_multiple_lists(self) -> None: + """ + Test that filters apply to `lists` in various scenarios. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a DM room + joined_dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + should_join_room=True, + ) + invited_dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + should_join_room=False, + ) + + # Create a normal room + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create a room that user1 is invited to + invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + # Absence of filters does not imply "False" values + "all": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {}, + }, + # Test single truthy filter + "dms": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": True}, + }, + # Test single falsy filter + "non-dms": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": False}, + }, + # Test how multiple filters should stack (AND'd together) + "room-invites": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": {"is_dm": False, "is_invite": True}, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure it has the lists we requested + self.assertIncludes( + response_body["lists"].keys(), + {"all", "dms", "non-dms", "room-invites"}, + exact=True, + ) + + # Make sure the lists have the correct rooms + self.assertIncludes( + set(response_body["lists"]["all"]["ops"][0]["room_ids"]), + { + invite_room_id, + room_id, + invited_dm_room_id, + joined_dm_room_id, + }, + exact=True, + ) + self.assertIncludes( + set(response_body["lists"]["dms"]["ops"][0]["room_ids"]), + {invited_dm_room_id, joined_dm_room_id}, + exact=True, + ) + self.assertIncludes( + set(response_body["lists"]["non-dms"]["ops"][0]["room_ids"]), + {invite_room_id, room_id}, + exact=True, + ) + self.assertIncludes( + set(response_body["lists"]["room-invites"]["ops"][0]["room_ids"]), + {invite_room_id}, + exact=True, + ) + + def test_filters_regardless_of_membership_server_left_room(self) -> None: + """ + Test that filters apply to rooms regardless of membership. We're also + compounding the problem by having all of the local users leave the room causing + our server to leave the room. + + We want to make sure that if someone is filtering rooms, and leaves, you still + get that final update down sync that you left. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a normal room + room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create an encrypted space room + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + self.helper.send_state( + space_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + self.helper.join(space_room_id, user1_id, tok=user1_tok) + + # Make an initial Sliding Sync request + sync_body = { + "lists": { + "all-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": { + "is_encrypted": True, + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Make sure the response has the lists we requested + self.assertIncludes( + response_body["lists"].keys(), + {"all-list", "foo-list"}, + ) + + # Make sure the lists have the correct rooms + self.assertIncludes( + set(response_body["lists"]["all-list"]["ops"][0]["room_ids"]), + {space_room_id, room_id}, + exact=True, + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + # Everyone leaves the encrypted space room + self.helper.leave(space_room_id, user1_id, tok=user1_tok) + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + + # Make an incremental Sliding Sync request + sync_body = { + "lists": { + "all-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": {}, + }, + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + "filters": { + "is_encrypted": True, + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Make sure the response has the lists we requested + self.assertIncludes( + response_body["lists"].keys(), + {"all-list", "foo-list"}, + exact=True, + ) + + # Make sure the lists have the correct rooms even though we `newly_left` + self.assertIncludes( + set(response_body["lists"]["all-list"]["ops"][0]["room_ids"]), + {space_room_id, room_id}, + exact=True, + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + def test_filters_is_dm(self) -> None: + """ + Test `filter.is_dm` for DM rooms + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a normal room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a DM room + dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + ) + + # Try with `is_dm=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_dm": True, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {dm_room_id}, + exact=True, + ) + + # Try with `is_dm=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_dm": False, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted(self) -> None: + """ + Test `filters.is_encrypted` for encrypted rooms + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # No rooms are encrypted yet + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # No rooms are encrypted yet + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_server_left_room(self) -> None: + """ + Test that we can apply a `filters.is_encrypted` against a room that everyone has left. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Get a token before we create any rooms + sync_body: JsonDict = { + "lists": {}, + } + response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Leave the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + # Leave the room + self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_server_left_room2(self) -> None: + """ + Test that we can apply a `filters.is_encrypted` against a room that everyone has + left. + + There is still someone local who is invited to the rooms but that doesn't affect + whether the server is participating in the room (users need to be joined). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + _user2_tok = self.login(user2_id, "pass") + + # Get a token before we create any rooms + sync_body: JsonDict = { + "lists": {}, + } + response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Invite user2 + self.helper.invite(room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + # Invite user2 + self.helper.invite(encrypted_room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(encrypted_room_id, user1_id, tok=user1_tok) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_after_we_left(self) -> None: + """ + Test that we can apply a `filters.is_encrypted` against a room that was encrypted + after we left the room (make sure we don't just use the current state) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Get a token before we create any rooms + sync_body: JsonDict = { + "lists": {}, + } + response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + # Leave the room + self.helper.join(room_id, user1_id, tok=user1_tok) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create a room that will be encrypted + encrypted_after_we_left_room_id = self.helper.create_room_as( + user2_id, tok=user2_tok + ) + # Leave the room + self.helper.join(encrypted_after_we_left_room_id, user1_id, tok=user1_tok) + self.helper.leave(encrypted_after_we_left_room_id, user1_id, tok=user1_tok) + + # Encrypt the room after we've left + self.helper.send_state( + encrypted_after_we_left_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + if self.use_new_tables: + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + else: + # Even though we left the room before it was encrypted, we still see it because + # someone else on our server is still participating in the room and we "leak" + # the current state to the left user. But we consider the room encryption status + # to not be a secret given it's often set at the start of the room and it's one + # of the stripped state events that is normally handed out. + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_after_we_left_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + if self.use_new_tables: + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, encrypted_after_we_left_room_id}, + exact=True, + ) + else: + # Even though we left the room before it was encrypted... (see comment above) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_with_remote_invite_room_no_stripped_state( + self, + ) -> None: + """ + Test that we can apply a `filters.is_encrypted` filter against a remote invite + room without any `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room without any `unsigned.invite_room_state` + _remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, None + ) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear because we can't figure out whether + # it is encrypted or not (no stripped state, `unsigned.invite_room_state`). + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear because we can't figure out whether + # it is encrypted or not (no stripped state, `unsigned.invite_room_state`). + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_with_remote_invite_encrypted_room(self) -> None: + """ + Test that we can apply a `filters.is_encrypted` filter against a remote invite + encrypted room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should appear here because it is encrypted + # according to the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id, remote_invite_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear here because it is encrypted + # according to the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_encrypted_with_remote_invite_unencrypted_room(self) -> None: + """ + Test that we can apply a `filters.is_encrypted` filter against a remote invite + unencrypted room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # but don't set any room encryption event. + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + # No room encryption event + ], + ) + + # Create an unencrypted room + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create an encrypted room + encrypted_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + self.helper.send_state( + encrypted_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # Try with `is_encrypted=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear here because it is unencrypted + # according to the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {encrypted_room_id}, + exact=True, + ) + + # Try with `is_encrypted=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": False, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should appear because it is unencrypted according to + # the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, remote_invite_room_id}, + exact=True, + ) + + def test_filters_is_encrypted_updated(self) -> None: + """ + Make sure we get rooms if the encrypted room status is updated for a joined room + (`filters.is_encrypted`) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_encrypted": True, + }, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # No rooms are encrypted yet + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + # Update the encryption status + self.helper.send_state( + room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # We should see the room now because it's encrypted + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_is_invite_rooms(self) -> None: + """ + Test `filters.is_invite` for rooms that the user has been invited to + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a normal room + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create a room that user1 is invited to + invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) + + # Try with `is_invite=True` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_invite": True, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {invite_room_id}, + exact=True, + ) + + # Try with `is_invite=False` + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "is_invite": False, + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_filters_room_types(self) -> None: + """ + Test `filters.room_types` for different room types + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + # Create an arbitrarily typed room + foo_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": { + EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz" + } + }, + ) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + # Try finding normal rooms and spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None, RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, space_room_id}, + exact=True, + ) + + # Try finding an arbitrary room type + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": ["org.matrix.foobarbaz"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {foo_room_id}, + exact=True, + ) + + # Just make sure we know what happens when you specify an empty list of room_types + # (we should find nothing) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + def test_filters_not_room_types(self) -> None: + """ + Test `filters.not_room_types` for different room types + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + # Create an arbitrarily typed room + foo_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": { + EventContentFields.ROOM_TYPE: "org.matrix.foobarbaz" + } + }, + ) + + # Try finding *NOT* normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id, foo_room_id}, + exact=True, + ) + + # Try finding *NOT* spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, foo_room_id}, + exact=True, + ) + + # Try finding *NOT* normal rooms or spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_room_types": [None, RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {foo_room_id}, + exact=True, + ) + + # Test how it behaves when we have both `room_types` and `not_room_types`. + # `not_room_types` should win. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + "not_room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # Nothing matches because nothing is both a normal room and not a normal room + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + # Test how it behaves when we have both `room_types` and `not_room_types`. + # `not_room_types` should win. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None, RoomTypes.SPACE], + "not_room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + # Just make sure we know what happens when you specify an empty list of not_room_types + # (we should find all of the rooms) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_room_types": [], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, foo_room_id, space_room_id}, + exact=True, + ) + + def test_filters_room_types_server_left_room(self) -> None: + """ + Test that we can apply a `filters.room_types` against a room that everyone has left. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Get a token before we create any rooms + sync_body: JsonDict = { + "lists": {}, + } + response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Leave the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Leave the room + self.helper.leave(space_room_id, user1_id, tok=user1_tok) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + def test_filter_room_types_server_left_room2(self) -> None: + """ + Test that we can apply a `filter.room_types` against a room that everyone has left. + + There is still someone local who is invited to the rooms but that doesn't affect + whether the server is participating in the room (users need to be joined). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + _user2_tok = self.login(user2_id, "pass") + + # Get a token before we create any rooms + sync_body: JsonDict = { + "lists": {}, + } + response_body, before_rooms_token = self.do_sync(sync_body, tok=user1_tok) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Invite user2 + self.helper.invite(room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Invite user2 + self.helper.invite(space_room_id, targ=user2_id, tok=user1_tok) + # User1 leaves the room + self.helper.leave(space_room_id, user1_id, tok=user1_tok) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + # Use an incremental sync so that the room is considered `newly_left` and shows + # up down sync + response_body, _ = self.do_sync( + sync_body, since=before_rooms_token, tok=user1_tok + ) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + def test_filters_room_types_with_remote_invite_room_no_stripped_state(self) -> None: + """ + Test that we can apply a `filters.room_types` filter against a remote invite + room without any `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room without any `unsigned.invite_room_state` + _remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, None + ) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + # `remote_invite_room_id` should not appear because we can't figure out what + # room type it is (no stripped state, `unsigned.invite_room_state`) + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + # `remote_invite_room_id` should not appear because we can't figure out what + # room type it is (no stripped state, `unsigned.invite_room_state`) + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + def test_filters_room_types_with_remote_invite_space(self) -> None: + """ + Test that we can apply a `filters.room_types` filter against a remote invite + to a space room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` indicating + # that it is a space room + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + # Specify that it is a space room + EventContentFields.ROOM_TYPE: RoomTypes.SPACE, + }, + ), + ], + ) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear here because it is a space room + # according to the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should appear here because it is a space room + # according to the stripped state + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id, remote_invite_room_id}, + exact=True, + ) + + def test_filters_room_types_with_remote_invite_normal_room(self) -> None: + """ + Test that we can apply a `filters.room_types` filter against a remote invite + to a normal room with some `unsigned.invite_room_state` (stripped state). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # but the create event does not specify a room type (normal room) + remote_invite_room_id = self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + # No room type means this is a normal room + }, + ), + ], + ) + + # Create a normal room (no room type) + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a space room + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + + # Try finding only normal rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [None], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should appear here because it is a normal room + # according to the stripped state (no room type) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id, remote_invite_room_id}, + exact=True, + ) + + # Try finding only spaces + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # `remote_invite_room_id` should not appear here because it is a normal room + # according to the stripped state (no room type) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {space_room_id}, + exact=True, + ) + + def _add_tag_to_room( + self, *, room_id: str, user_id: str, access_token: str, tag_name: str + ) -> None: + channel = self.make_request( + method="PUT", + path=f"/user/{user_id}/rooms/{room_id}/tags/{tag_name}", + content={}, + access_token=access_token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + def test_filters_tags(self) -> None: + """ + Test `filters.tags` for rooms with given tags + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with no tags + self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create some rooms with tags + foo_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + bar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Create a room without multiple tags + foobar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Add the "foo" tag to the foo room + self._add_tag_to_room( + room_id=foo_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="foo", + ) + # Add the "bar" tag to the bar room + self._add_tag_to_room( + room_id=bar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="bar", + ) + # Add both "foo" and "bar" tags to the foobar room + self._add_tag_to_room( + room_id=foobar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="foo", + ) + self._add_tag_to_room( + room_id=foobar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="bar", + ) + + # Try finding rooms with the "foo" tag + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "tags": ["foo"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {foo_room_id, foobar_room_id}, + exact=True, + ) + + # Try finding rooms with either "foo" or "bar" tags + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "tags": ["foo", "bar"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {foo_room_id, bar_room_id, foobar_room_id}, + exact=True, + ) + + # Try with a random tag we didn't add + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "tags": ["flomp"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # No rooms should match + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + # Just make sure we know what happens when you specify an empty list of tags + # (we should find nothing) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "tags": [], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + def test_filters_not_tags(self) -> None: + """ + Test `filters.not_tags` for excluding rooms with given tags + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with no tags + untagged_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create some rooms with tags + foo_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + bar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Create a room without multiple tags + foobar_room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Add the "foo" tag to the foo room + self._add_tag_to_room( + room_id=foo_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="foo", + ) + # Add the "bar" tag to the bar room + self._add_tag_to_room( + room_id=bar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="bar", + ) + # Add both "foo" and "bar" tags to the foobar room + self._add_tag_to_room( + room_id=foobar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="foo", + ) + self._add_tag_to_room( + room_id=foobar_room_id, + user_id=user1_id, + access_token=user1_tok, + tag_name="bar", + ) + + # Try finding rooms without the "foo" tag + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_tags": ["foo"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {untagged_room_id, bar_room_id}, + exact=True, + ) + + # Try finding rooms without either "foo" or "bar" tags + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_tags": ["foo", "bar"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {untagged_room_id}, + exact=True, + ) + + # Test how it behaves when we have both `tags` and `not_tags`. + # `not_tags` should win. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "tags": ["foo"], + "not_tags": ["foo"], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # Nothing matches because nothing is both tagged with "foo" and not tagged with "foo" + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, + ) + + # Just make sure we know what happens when you specify an empty list of not_tags + # (we should find all of the rooms) + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + "filters": { + "not_tags": [], + }, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {untagged_room_id, foo_room_id, bar_room_id, foobar_room_id}, + exact=True, + ) diff --git a/tests/rest/client/sliding_sync/test_room_subscriptions.py b/tests/rest/client/sliding_sync/test_room_subscriptions.py
index cc17b0b354..285fdaaf78 100644 --- a/tests/rest/client/sliding_sync/test_room_subscriptions.py +++ b/tests/rest/client/sliding_sync/test_room_subscriptions.py
@@ -14,6 +14,8 @@ import logging from http import HTTPStatus +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -27,6 +29,20 @@ from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncRoomSubscriptionsTestCase(SlidingSyncBase): """ Test `room_subscriptions` in the Sliding Sync API. @@ -43,6 +59,8 @@ class SlidingSyncRoomSubscriptionsTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + super().prepare(reactor, clock, hs) + def test_room_subscriptions_with_join_membership(self) -> None: """ Test `room_subscriptions` with a joined room should give us timeline and current diff --git a/tests/rest/client/sliding_sync/test_rooms_invites.py b/tests/rest/client/sliding_sync/test_rooms_invites.py
index f08ffaf674..882762ca29 100644 --- a/tests/rest/client/sliding_sync/test_rooms_invites.py +++ b/tests/rest/client/sliding_sync/test_rooms_invites.py
@@ -13,6 +13,8 @@ # import logging +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -27,6 +29,20 @@ from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncRoomsInvitesTestCase(SlidingSyncBase): """ Test to make sure the `rooms` response looks good for invites in the Sliding Sync API. @@ -49,6 +65,8 @@ class SlidingSyncRoomsInvitesTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + super().prepare(reactor, clock, hs) + def test_rooms_invite_shared_history_initial_sync(self) -> None: """ Test that `rooms` we are invited to have some stripped `invite_state` during an diff --git a/tests/rest/client/sliding_sync/test_rooms_meta.py b/tests/rest/client/sliding_sync/test_rooms_meta.py
index 04f11c0524..0a8b2c02c2 100644 --- a/tests/rest/client/sliding_sync/test_rooms_meta.py +++ b/tests/rest/client/sliding_sync/test_rooms_meta.py
@@ -13,10 +13,12 @@ # import logging +from parameterized import parameterized, parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.rest.client import login, room, sync from synapse.server import HomeServer @@ -28,6 +30,20 @@ from tests.test_utils.event_injection import create_event logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): """ Test rooms meta info like name, avatar, joined_count, invited_count, is_dm, @@ -44,11 +60,18 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + self.state_handler = self.hs.get_state_handler() + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.persistence = persistence + + super().prepare(reactor, clock, hs) - def test_rooms_meta_when_joined(self) -> None: + def test_rooms_meta_when_joined_initial(self) -> None: """ - Test that the `rooms` `name` and `avatar` are included in the response and - reflect the current state of the room when the user is joined to the room. + Test that the `rooms` `name` and `avatar` are included in the initial sync + response and reflect the current state of the room when the user is joined to + the room. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -85,6 +108,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Reflect the current state of the room + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertEqual( response_body["rooms"][room_id1]["name"], "my super room", @@ -107,6 +131,178 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body["rooms"][room_id1].get("is_dm"), ) + def test_rooms_meta_when_joined_incremental_no_change(self) -> None: + """ + Test that the `rooms` `name` and `avatar` aren't included in an incremental sync + response if they haven't changed. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + # This needs to be set to one so the `RoomResult` isn't empty and + # the room comes down incremental sync when we send a new message. + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send a message to make the room come down sync + self.helper.send(room_id1, "message in room1", tok=user2_tok) + + # Incremental sync + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # We should only see changed meta info (nothing changed so we shouldn't see any + # of these fields) + self.assertNotIn( + "initial", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "name", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "avatar", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], + ) + self.assertIsNone( + response_body["rooms"][room_id1].get("is_dm"), + ) + + @parameterized.expand( + [ + ("in_required_state", True), + ("not_in_required_state", False), + ] + ) + def test_rooms_meta_when_joined_incremental_with_state_change( + self, test_description: str, include_changed_state_in_required_state: bool + ) -> None: + """ + Test that the `rooms` `name` and `avatar` are included in an incremental sync + response if they changed. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + # Set the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID"}, + tok=user2_tok, + ) + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": ( + [[EventTypes.Name, ""], [EventTypes.RoomAvatar, ""]] + # Conditionally include the changed state in the + # `required_state` to make sure whether we request it or not, + # the new room name still flows down to the client. + if include_changed_state_in_required_state + else [] + ), + "timeline_limit": 0, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Update the room name + self.helper.send_state( + room_id1, + EventTypes.Name, + {EventContentFields.ROOM_NAME: "my super duper room"}, + tok=user2_tok, + ) + # Update the room avatar URL + self.helper.send_state( + room_id1, + EventTypes.RoomAvatar, + {"url": "mxc://DUMMY_MEDIA_ID_UPDATED"}, + tok=user2_tok, + ) + + # Incremental sync + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # We should only see changed meta info (the room name and avatar) + self.assertNotIn( + "initial", + response_body["rooms"][room_id1], + ) + self.assertEqual( + response_body["rooms"][room_id1]["name"], + "my super duper room", + response_body["rooms"][room_id1], + ) + self.assertEqual( + response_body["rooms"][room_id1]["avatar"], + "mxc://DUMMY_MEDIA_ID_UPDATED", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], + ) + self.assertIsNone( + response_body["rooms"][room_id1].get("is_dm"), + ) + def test_rooms_meta_when_invited(self) -> None: """ Test that the `rooms` `name` and `avatar` are included in the response and @@ -164,6 +360,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): # This should still reflect the current state of the room even when the user is # invited. + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertEqual( response_body["rooms"][room_id1]["name"], "my super duper room", @@ -174,14 +371,17 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): "mxc://UPDATED_DUMMY_MEDIA_ID", response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - 1, + + # We don't give extra room information to invitees + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 1, + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], ) + self.assertIsNone( response_body["rooms"][room_id1].get("is_dm"), ) @@ -242,6 +442,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Reflect the state of the room at the time of leaving + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertEqual( response_body["rooms"][room_id1]["name"], "my super room", @@ -252,15 +453,16 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): "mxc://DUMMY_MEDIA_ID", response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - # FIXME: The actual number should be "1" (user2) but we currently don't - # support this for rooms where the user has left/been banned. - 0, + + # FIXME: We possibly want to return joined and invited counts for rooms + # you're banned form + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], ) - self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - 0, + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], ) self.assertIsNone( response_body["rooms"][room_id1].get("is_dm"), @@ -316,6 +518,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): # Room1 has a name so we shouldn't see any `heroes` which the client would use # the calculate the room name themselves. + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertEqual( response_body["rooms"][room_id1]["name"], "my super room", @@ -332,6 +535,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): ) # Room2 doesn't have a name so we should see `heroes` populated + self.assertEqual(response_body["rooms"][room_id2]["initial"], True) self.assertIsNone(response_body["rooms"][room_id2].get("name")) self.assertCountEqual( [ @@ -403,6 +607,7 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Room2 doesn't have a name so we should see `heroes` populated + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertIsNone(response_body["rooms"][room_id1].get("name")) self.assertCountEqual( [ @@ -475,7 +680,8 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): } response_body, _ = self.do_sync(sync_body, tok=user1_tok) - # Room2 doesn't have a name so we should see `heroes` populated + # Room doesn't have a name so we should see `heroes` populated + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) self.assertIsNone(response_body["rooms"][room_id1].get("name")) self.assertCountEqual( [ @@ -490,20 +696,175 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): [], ) + # FIXME: We possibly want to return joined and invited counts for rooms + # you're banned form + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id1], + ) + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id1], + ) + + def test_rooms_meta_heroes_incremental_sync_no_change(self) -> None: + """ + Test that the `rooms` `heroes` aren't included in an incremental sync + response if they haven't changed. + + (when the room doesn't have a room name set) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + _user3_tok = self.login(user3_id, "pass") + + room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room2", + }, + ) + self.helper.join(room_id, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id, src=user2_id, targ=user3_id, tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + # This needs to be set to one so the `RoomResult` isn't empty and + # the room comes down incremental sync when we send a new message. + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send a message to make the room come down sync + self.helper.send(room_id, "message in room", tok=user2_tok) + + # Incremental sync + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # This is an incremental sync and the second time we have seen this room so it + # isn't `initial` + self.assertNotIn( + "initial", + response_body["rooms"][room_id], + ) + # Room shouldn't have a room name because we're testing the `heroes` field which + # will only has a chance to appear if the room doesn't have a name. + self.assertNotIn( + "name", + response_body["rooms"][room_id], + ) + # No change to heroes + self.assertNotIn( + "heroes", + response_body["rooms"][room_id], + ) + # No change to member counts + self.assertNotIn( + "joined_count", + response_body["rooms"][room_id], + ) + self.assertNotIn( + "invited_count", + response_body["rooms"][room_id], + ) + # We didn't request any state so we shouldn't see any `required_state` + self.assertNotIn( + "required_state", + response_body["rooms"][room_id], + ) + + def test_rooms_meta_heroes_incremental_sync_with_membership_change(self) -> None: + """ + Test that the `rooms` `heroes` are included in an incremental sync response if + the membership has changed. + + (when the room doesn't have a room name set) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + # No room name set so that `heroes` is populated + # + # "name": "my super room2", + }, + ) + self.helper.join(room_id, user1_id, tok=user1_tok) + # User3 is invited + self.helper.invite(room_id, src=user2_id, targ=user3_id, tok=user2_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # User3 joins (membership change) + self.helper.join(room_id, user3_id, tok=user3_tok) + + # Incremental sync + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # This is an incremental sync and the second time we have seen this room so it + # isn't `initial` + self.assertNotIn( + "initial", + response_body["rooms"][room_id], + ) + # Room shouldn't have a room name because we're testing the `heroes` field which + # will only has a chance to appear if the room doesn't have a name. + self.assertNotIn( + "name", + response_body["rooms"][room_id], + ) + # Membership change so we should see heroes and membership counts + self.assertCountEqual( + [ + hero["user_id"] + for hero in response_body["rooms"][room_id].get("heroes", []) + ], + # Heroes shouldn't include the user themselves (we shouldn't see user1) + [user2_id, user3_id], + ) self.assertEqual( - response_body["rooms"][room_id1]["joined_count"], - # FIXME: The actual number should be "1" (user2) but we currently don't - # support this for rooms where the user has left/been banned. - 0, + response_body["rooms"][room_id]["joined_count"], + 3, ) self.assertEqual( - response_body["rooms"][room_id1]["invited_count"], - # We shouldn't see user5 since they were invited after user1 was banned. - # - # FIXME: The actual number should be "1" (user3) but we currently don't - # support this for rooms where the user has left/been banned. + response_body["rooms"][room_id]["invited_count"], 0, ) + # We didn't request any state so we shouldn't see any `required_state` + self.assertNotIn( + "required_state", + response_body["rooms"][room_id], + ) def test_rooms_bump_stamp(self) -> None: """ @@ -566,19 +927,17 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): ) # Make sure the list includes the rooms in the right order - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 1], - # room1 sorts before room2 because it has the latest event (the - # reaction) - "room_ids": [room_id1, room_id2], - } - ], + self.assertEqual( + len(response_body["lists"]["foo-list"]["ops"]), + 1, response_body["lists"]["foo-list"], ) + op = response_body["lists"]["foo-list"]["ops"][0] + self.assertEqual(op["op"], "SYNC") + self.assertEqual(op["range"], [0, 1]) + # Note that we don't sort the rooms when the range includes all of the rooms, so + # we just assert that the rooms are included + self.assertIncludes(set(op["room_ids"]), {room_id1, room_id2}, exact=True) # The `bump_stamp` for room1 should point at the latest message (not the # reaction since it's not one of the `DEFAULT_BUMP_EVENT_TYPES`) @@ -600,16 +959,16 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): Test that `bump_stamp` ignores backfilled events, i.e. events with a negative stream ordering. """ - user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") # Create a remote room creator = "@user:other" room_id = "!foo:other" + room_version = RoomVersions.V10 shared_kwargs = { "room_id": room_id, - "room_version": "10", + "room_version": room_version.identifier, } create_tuple = self.get_success( @@ -618,6 +977,12 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): prev_event_ids=[], type=EventTypes.Create, state_key="", + content={ + # The `ROOM_CREATOR` field could be removed if we used a room + # version > 10 (in favor of relying on `sender`) + EventContentFields.ROOM_CREATOR: creator, + EventContentFields.ROOM_VERSION: room_version.identifier, + }, sender=creator, **shared_kwargs, ) @@ -667,22 +1032,29 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): ] # Ensure the local HS knows the room version - self.get_success( - self.store.store_room(room_id, creator, False, RoomVersions.V10) - ) + self.get_success(self.store.store_room(room_id, creator, False, room_version)) # Persist these events as backfilled events. - persistence = self.hs.get_storage_controllers().persistence - assert persistence is not None - for event, context in remote_events_and_contexts: - self.get_success(persistence.persist_event(event, context, backfilled=True)) + self.get_success( + self.persistence.persist_event(event, context, backfilled=True) + ) - # Now we join the local user to the room - join_tuple = self.get_success( + # Now we join the local user to the room. We want to make this feel as close to + # the real `process_remote_join()` as possible but we'd like to avoid some of + # the auth checks that would be done in the real code. + # + # FIXME: The test was originally written using this less-real + # `persist_event(...)` shortcut but it would be nice to use the real remote join + # process in a `FederatingHomeserverTestCase`. + flawed_join_tuple = self.get_success( create_event( self.hs, prev_event_ids=[invite_tuple[0].event_id], + # This doesn't work correctly to create an `EventContext` that includes + # both of these state events. I assume it's because we're working on our + # local homeserver which has the remote state set as `outlier`. We have + # to create our own EventContext below to get this right. auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id], type=EventTypes.Member, state_key=user1_id, @@ -691,7 +1063,22 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): **shared_kwargs, ) ) - self.get_success(persistence.persist_event(*join_tuple)) + # We have to create our own context to get the state set correctly. If we use + # the `EventContext` from the `flawed_join_tuple`, the `current_state_events` + # table will only have the join event in it which should never happen in our + # real server. + join_event = flawed_join_tuple[0] + join_context = self.get_success( + self.state_handler.compute_event_context( + join_event, + state_ids_before_event={ + (e.type, e.state_key): e.event_id + for e in [create_tuple[0], invite_tuple[0]] + }, + partial_state=False, + ) + ) + self.get_success(self.persistence.persist_event(join_event, join_context)) # Doing an SS request should return a positive `bump_stamp`, even though # the only event that matches the bump types has as negative stream @@ -708,3 +1095,244 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0) + + def test_rooms_bump_stamp_no_change_incremental(self) -> None: + """Test that the bump stamp is omitted if there has been no change""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 100, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Initial sync so we expect to see a bump stamp + self.assertIn("bump_stamp", response_body["rooms"][room_id1]) + + # Send an event that is not in the bump events list + self.helper.send_event( + room_id1, type="org.matrix.test", content={}, tok=user1_tok + ) + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # There hasn't been a change to the bump stamps, so we ignore it + self.assertNotIn("bump_stamp", response_body["rooms"][room_id1]) + + def test_rooms_bump_stamp_change_incremental(self) -> None: + """Test that the bump stamp is included if there has been a change, even + if its not in the timeline""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 2, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Initial sync so we expect to see a bump stamp + self.assertIn("bump_stamp", response_body["rooms"][room_id1]) + first_bump_stamp = response_body["rooms"][room_id1]["bump_stamp"] + + # Send a bump event at the start. + self.helper.send(room_id1, "test", tok=user1_tok) + + # Send events that are not in the bump events list to fill the timeline + for _ in range(5): + self.helper.send_event( + room_id1, type="org.matrix.test", content={}, tok=user1_tok + ) + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # There was a bump event in the timeline gap, so we should see the bump + # stamp be updated. + self.assertIn("bump_stamp", response_body["rooms"][room_id1]) + second_bump_stamp = response_body["rooms"][room_id1]["bump_stamp"] + + self.assertGreater(second_bump_stamp, first_bump_stamp) + + def test_rooms_bump_stamp_invites(self) -> None: + """ + Test that `bump_stamp` is present and points to the membership event, + and not later events, for non-joined rooms + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + ) + + # Invite user1 to the room + invite_response = self.helper.invite(room_id, user2_id, user1_id, tok=user2_tok) + + # More messages happen after the invite + self.helper.send(room_id, "message in room1", tok=user2_tok) + + # We expect the bump_stamp to match the invite. + invite_pos = self.get_success( + self.store.get_position_for_event(invite_response["event_id"]) + ) + + # Doing an SS request should return a `bump_stamp` of the invite event, + # rather than the message that was sent after. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 5, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + self.assertEqual( + response_body["rooms"][room_id]["bump_stamp"], invite_pos.stream + ) + + def test_rooms_meta_is_dm(self) -> None: + """ + Test `rooms` `is_dm` is correctly set for DM rooms. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a DM room + joined_dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + should_join_room=True, + ) + invited_dm_room_id = self._create_dm_room( + inviter_user_id=user1_id, + inviter_tok=user1_tok, + invitee_user_id=user2_id, + invitee_tok=user2_tok, + should_join_room=False, + ) + + # Create a normal room + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Create a room that user1 is invited to + invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Ensure DM's are correctly marked + self.assertDictEqual( + { + room_id: room.get("is_dm") + for room_id, room in response_body["rooms"].items() + }, + { + invite_room_id: None, + room_id: None, + invited_dm_room_id: True, + joined_dm_room_id: True, + }, + ) + + def test_old_room_with_unknown_room_version(self) -> None: + """Test that an old room with unknown room version does not break + sync.""" + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # We first create a standard room, then we'll change the room version in + # the DB. + room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + ) + + # Poke the database and update the room version to an unknown one. + self.get_success( + self.hs.get_datastores().main.db_pool.simple_update( + "rooms", + keyvalues={"room_id": room_id}, + updatevalues={"room_version": "unknown-room-version"}, + desc="updated-room-version", + ) + ) + + # Invalidate method so that it returns the currently updated version + # instead of the cached version. + self.hs.get_datastores().main.get_room_version_id.invalidate((room_id,)) + + # For old unknown room versions we won't have an entry in this table + # (due to us skipping unknown room versions in the background update). + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + desc="delete_sliding_room", + ) + ) + + # Also invalidate some caches to ensure we pull things from the DB. + self.store._events_stream_cache._entity_to_key.pop(room_id) + self.store._get_max_event_pos.invalidate((room_id,)) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 5, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py
index a13cad223f..ba46c5a93c 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py
@@ -11,16 +11,17 @@ # See the GNU Affero General Public License for more details: # <https://www.gnu.org/licenses/agpl-3.0.html>. # +import enum import logging -from parameterized import parameterized +from parameterized import parameterized, parameterized_class from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventContentFields, EventTypes, JoinRules, Membership from synapse.handlers.sliding_sync import StateValues -from synapse.rest.client import login, room, sync +from synapse.rest.client import knock, login, room, sync from synapse.server import HomeServer from synapse.util import Clock @@ -30,6 +31,31 @@ from tests.test_utils.event_injection import mark_event_as_partial_state logger = logging.getLogger(__name__) +# Inherit from `str` so that they show up in the test description when we +# `@parameterized.expand(...)` the first parameter +class MembershipAction(str, enum.Enum): + INVITE = "invite" + JOIN = "join" + KNOCK = "knock" + LEAVE = "leave" + BAN = "ban" + KICK = "kick" + + +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): """ Test `rooms.required_state` in the Sliding Sync API. @@ -38,6 +64,7 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, + knock.register_servlets, room.register_servlets, sync.register_servlets, ] @@ -46,6 +73,8 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + super().prepare(reactor, clock, hs) + def test_rooms_no_required_state(self) -> None: """ Empty `rooms.required_state` should not return any state events in the room @@ -191,8 +220,14 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): } _, from_token = self.do_sync(sync_body, tok=user1_tok) - # Reset the in-memory cache - self.hs.get_sliding_sync_handler().connection_store._connections.clear() + # Reset the positions + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_connections", + keyvalues={"user_id": user1_id}, + desc="clear_sliding_sync_connections_cache", + ) + ) # Make the Sliding Sync request channel = self.make_request( @@ -359,10 +394,10 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): ) self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - def test_rooms_required_state_lazy_loading_room_members(self) -> None: + def test_rooms_required_state_lazy_loading_room_members_initial_sync(self) -> None: """ - Test `rooms.required_state` returns people relevant to the timeline when - lazy-loading room members, `["m.room.member","$LAZY"]`. + On initial sync, test `rooms.required_state` returns people relevant to the + timeline when lazy-loading room members, `["m.room.member","$LAZY"]`. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -410,6 +445,402 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): ) self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + def test_rooms_required_state_lazy_loading_room_members_incremental_sync( + self, + ) -> None: + """ + On incremental sync, test `rooms.required_state` returns people relevant to the + timeline when lazy-loading room members, `["m.room.member","$LAZY"]`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + self.helper.send(room_id1, "2", tok=user2_tok) + self.helper.send(room_id1, "3", tok=user2_tok) + + # Make the Sliding Sync request with lazy loading for the room members + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.LAZY], + ], + "timeline_limit": 3, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send more timeline events into the room + self.helper.send(room_id1, "4", tok=user2_tok) + self.helper.send(room_id1, "5", tok=user4_tok) + self.helper.send(room_id1, "6", tok=user4_tok) + + # Make an incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user4 sent events in the last 3 events we see in the `timeline` + # but since we've seen user2 in the last sync (and their membership hasn't + # changed), we should only see user4 here. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Member, user4_id)], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + @parameterized.expand( + [ + (MembershipAction.LEAVE,), + (MembershipAction.INVITE,), + (MembershipAction.KNOCK,), + (MembershipAction.JOIN,), + (MembershipAction.BAN,), + (MembershipAction.KICK,), + ] + ) + def test_rooms_required_state_changed_membership_in_timeline_lazy_loading_room_members_incremental_sync( + self, + room_membership_action: str, + ) -> None: + """ + On incremental sync, test `rooms.required_state` returns people relevant to the + timeline when lazy-loading room members, `["m.room.member","$LAZY"]` **including + changes to membership**. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) + # If we're testing knocks, set the room to knock + if room_membership_action == MembershipAction.KNOCK: + self.helper.send_state( + room_id1, + EventTypes.JoinRules, + {"join_rule": JoinRules.KNOCK}, + tok=user2_tok, + ) + + # Join the test users to the room + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.invite(room_id1, src=user2_id, targ=user4_id, tok=user2_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + if room_membership_action in ( + MembershipAction.LEAVE, + MembershipAction.BAN, + MembershipAction.JOIN, + ): + self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + self.helper.join(room_id1, user5_id, tok=user5_tok) + + # Send some messages to fill up the space + self.helper.send(room_id1, "1", tok=user2_tok) + self.helper.send(room_id1, "2", tok=user2_tok) + self.helper.send(room_id1, "3", tok=user2_tok) + + # Make the Sliding Sync request with lazy loading for the room members + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.LAZY], + ], + "timeline_limit": 3, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send more timeline events into the room + self.helper.send(room_id1, "4", tok=user2_tok) + self.helper.send(room_id1, "5", tok=user4_tok) + # The third event will be our membership event concerning user5 + if room_membership_action == MembershipAction.LEAVE: + # User 5 leaves + self.helper.leave(room_id1, user5_id, tok=user5_tok) + elif room_membership_action == MembershipAction.INVITE: + # User 5 is invited + self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + elif room_membership_action == MembershipAction.KNOCK: + # User 5 knocks + self.helper.knock(room_id1, user5_id, tok=user5_tok) + # The admin of the room accepts the knock + self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + elif room_membership_action == MembershipAction.JOIN: + # Update the display name of user5 (causing a membership change) + self.helper.send_state( + room_id1, + event_type=EventTypes.Member, + state_key=user5_id, + body={ + EventContentFields.MEMBERSHIP: Membership.JOIN, + EventContentFields.MEMBERSHIP_DISPLAYNAME: "quick changer", + }, + tok=user5_tok, + ) + elif room_membership_action == MembershipAction.BAN: + self.helper.ban(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + elif room_membership_action == MembershipAction.KICK: + # Kick user5 from the room + self.helper.change_membership( + room=room_id1, + src=user2_id, + targ=user5_id, + tok=user2_tok, + membership=Membership.LEAVE, + extra_data={ + "reason": "Bad manners", + }, + ) + else: + raise AssertionError( + f"Unknown room_membership_action: {room_membership_action}" + ) + + # Make an incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2, user4, and user5 sent events in the last 3 events we see in the + # `timeline`. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + # This appears because *some* membership in the room changed and the + # heroes are recalculated and is thrown in because we have it. But this + # is technically optional and not needed because we've already seen user2 + # in the last sync (and their membership hasn't changed). + state_map[(EventTypes.Member, user2_id)], + # Appears because there is a message in the timeline from this user + state_map[(EventTypes.Member, user4_id)], + # Appears because there is a membership event in the timeline from this user + state_map[(EventTypes.Member, user5_id)], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_expand_lazy_loading_room_members_incremental_sync( + self, + ) -> None: + """ + Test that when we expand the `required_state` to include lazy-loading room + members, it returns people relevant to the timeline. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + self.helper.send(room_id1, "2", tok=user2_tok) + self.helper.send(room_id1, "3", tok=user2_tok) + + # Make the Sliding Sync request *without* lazy loading for the room members + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 3, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send more timeline events into the room + self.helper.send(room_id1, "4", tok=user2_tok) + self.helper.send(room_id1, "5", tok=user4_tok) + self.helper.send(room_id1, "6", tok=user4_tok) + + # Expand `required_state` and make an incremental Sliding Sync request *with* + # lazy-loading room members + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.LAZY], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user4 sent events in the last 3 events we see in the `timeline` + # and we haven't seen any membership before this sync so we should see both + # users. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Member, user2_id)], + state_map[(EventTypes.Member, user4_id)], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "7", tok=user2_tok) + self.helper.send(room_id1, "8", tok=user4_tok) + self.helper.send(room_id1, "9", tok=user4_tok) + + # Make another incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Only user2 and user4 sent events in the last 3 events we see in the `timeline` + # but since we've seen both memberships in the last sync, they shouldn't appear + # again. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1].get("required_state", []), + set(), + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_expand_retract_expand_lazy_loading_room_members_incremental_sync( + self, + ) -> None: + """ + Test that when we expand the `required_state` to include lazy-loading room + members, it returns people relevant to the timeline. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + self.helper.send(room_id1, "2", tok=user2_tok) + self.helper.send(room_id1, "3", tok=user2_tok) + + # Make the Sliding Sync request *without* lazy loading for the room members + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 3, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send more timeline events into the room + self.helper.send(room_id1, "4", tok=user2_tok) + self.helper.send(room_id1, "5", tok=user4_tok) + self.helper.send(room_id1, "6", tok=user4_tok) + + # Expand `required_state` and make an incremental Sliding Sync request *with* + # lazy-loading room members + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.LAZY], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user4 sent events in the last 3 events we see in the `timeline` + # and we haven't seen any membership before this sync so we should see both + # users because we're lazy-loading the room members. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Member, user2_id)], + state_map[(EventTypes.Member, user4_id)], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user4_tok) + + # Retract `required_state` and make an incremental Sliding Sync request + # requesting a few memberships + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.ME], + [EventTypes.Member, user2_id], + ] + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # We've seen user2's membership in the last sync so we shouldn't see it here + # even though it's requested. We should only see user1's membership. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Member, user1_id)], + }, + exact=True, + ) + def test_rooms_required_state_me(self) -> None: """ Test `rooms.required_state` correctly handles $ME. @@ -480,9 +911,10 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) - def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: + def test_rooms_required_state_leave_ban_initial(self, stop_membership: str) -> None: """ - Test `rooms.required_state` should not return state past a leave/ban event. + Test `rooms.required_state` should not return state past a leave/ban event when + it's the first "initial" time the room is being sent down the connection. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -517,6 +949,13 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): body={"foo": "bar"}, tok=user2_tok, ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "bar"}, + tok=user2_tok, + ) if stop_membership == Membership.LEAVE: # User 1 leaves @@ -525,6 +964,8 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): # User 1 is banned self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + # Get the state_map before we change the state as this is the final state we + # expect User1 to be able to see state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) ) @@ -537,12 +978,36 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): body={"foo": "qux"}, tok=user2_tok, ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "qux"}, + tok=user2_tok, + ) self.helper.leave(room_id1, user3_id, tok=user3_tok) - # Make the Sliding Sync request with lazy loading for the room members + # Make an incremental Sliding Sync request + # + # Also expand the required state to include the `org.matrix.bar_state` event. + # This is just an extra complication of the test. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, "*"], + ["org.matrix.foo_state", ""], + ["org.matrix.bar_state", ""], + ], + "timeline_limit": 3, + } + } + } response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - # Only user2 and user3 sent events in the 3 events we see in the `timeline` + # We should only see the state up to the leave/ban event self._assertRequiredStateIncludes( response_body["rooms"][room_id1]["required_state"], { @@ -551,6 +1016,126 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): state_map[(EventTypes.Member, user2_id)], state_map[(EventTypes.Member, user3_id)], state_map[("org.matrix.foo_state", "")], + state_map[("org.matrix.bar_state", "")], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) + def test_rooms_required_state_leave_ban_incremental( + self, stop_membership: str + ) -> None: + """ + Test `rooms.required_state` should not return state past a leave/ban event on + incremental sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "bar"}, + tok=user2_tok, + ) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, "*"], + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 3, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + if stop_membership == Membership.LEAVE: + # User 1 leaves + self.helper.leave(room_id1, user1_id, tok=user1_tok) + elif stop_membership == Membership.BAN: + # User 1 is banned + self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # Get the state_map before we change the state as this is the final state we + # expect User1 to be able to see + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Change the state after user 1 leaves + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "qux"}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "qux"}, + tok=user2_tok, + ) + self.helper.leave(room_id1, user3_id, tok=user3_tok) + + # Make an incremental Sliding Sync request + # + # Also expand the required state to include the `org.matrix.bar_state` event. + # This is just an extra complication of the test. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, "*"], + ["org.matrix.foo_state", ""], + ["org.matrix.bar_state", ""], + ], + "timeline_limit": 3, + } + } + } + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # User1 should only see the state up to the leave/ban event + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + # User1 should see their leave/ban membership + state_map[(EventTypes.Member, user1_id)], + state_map[("org.matrix.bar_state", "")], + # The commented out state events were already returned in the initial + # sync so we shouldn't see them again on the incremental sync. And we + # shouldn't see the state events that changed after the leave/ban event. + # + # state_map[(EventTypes.Create, "")], + # state_map[(EventTypes.Member, user2_id)], + # state_map[(EventTypes.Member, user3_id)], + # state_map[("org.matrix.foo_state", "")], }, exact=True, ) @@ -631,8 +1216,7 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): def test_rooms_required_state_partial_state(self) -> None: """ - Test partially-stated room are excluded unless `rooms.required_state` is - lazy-loading room members. + Test partially-stated room are excluded if they require full state. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -649,13 +1233,63 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): mark_event_as_partial_state(self.hs, join_response2["event_id"], room_id2) ) - # Make the Sliding Sync request (NOT lazy-loading room members) + # Make the Sliding Sync request with examples where `must_await_full_state()` is + # `False` sync_body = { "lists": { - "foo-list": { + "no-state-list": { + "ranges": [[0, 1]], + "required_state": [], + "timeline_limit": 0, + }, + "other-state-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 0, + }, + "lazy-load-list": { "ranges": [[0, 1]], "required_state": [ [EventTypes.Create, ""], + # Lazy-load room members + [EventTypes.Member, StateValues.LAZY], + # Local member + [EventTypes.Member, user2_id], + ], + "timeline_limit": 0, + }, + "local-members-only-list": { + "ranges": [[0, 1]], + "required_state": [ + # Own user ID + [EventTypes.Member, user1_id], + # Local member + [EventTypes.Member, user2_id], + ], + "timeline_limit": 0, + }, + "me-list": { + "ranges": [[0, 1]], + "required_state": [ + # Own user ID + [EventTypes.Member, StateValues.ME], + # Local member + [EventTypes.Member, user2_id], + ], + "timeline_limit": 0, + }, + "wildcard-type-local-state-key-list": { + "ranges": [[0, 1]], + "required_state": [ + ["*", user1_id], + # Not a user ID + ["*", "foobarbaz"], + # Not a user ID + ["*", "foo.bar.baz"], + # Not a user ID + ["*", "@foo"], ], "timeline_limit": 0, }, @@ -663,29 +1297,89 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): } response_body, _ = self.do_sync(sync_body, tok=user1_tok) - # Make sure the list includes room1 but room2 is excluded because it's still - # partially-stated - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 1], - "room_ids": [room_id1], + # The list should include both rooms now because we don't need full state + for list_key in response_body["lists"].keys(): + self.assertIncludes( + set(response_body["lists"][list_key]["ops"][0]["room_ids"]), + {room_id2, room_id1}, + exact=True, + message=f"Expected all rooms to show up for list_key={list_key}. Response " + + str(response_body["lists"][list_key]), + ) + + # Take each of the list variants and apply them to room subscriptions to make + # sure the same rules apply + for list_key in sync_body["lists"].keys(): + sync_body_for_subscriptions = { + "room_subscriptions": { + room_id1: { + "required_state": sync_body["lists"][list_key][ + "required_state" + ], + "timeline_limit": 0, + }, + room_id2: { + "required_state": sync_body["lists"][list_key][ + "required_state" + ], + "timeline_limit": 0, + }, } - ], - response_body["lists"]["foo-list"], - ) + } + response_body, _ = self.do_sync(sync_body_for_subscriptions, tok=user1_tok) + + self.assertIncludes( + set(response_body["rooms"].keys()), + {room_id2, room_id1}, + exact=True, + message=f"Expected all rooms to show up for test_key={list_key}.", + ) - # Make the Sliding Sync request (with lazy-loading room members) + # ===================================================================== + + # Make the Sliding Sync request with examples where `must_await_full_state()` is + # `True` sync_body = { "lists": { - "foo-list": { + "wildcard-list": { + "ranges": [[0, 1]], + "required_state": [ + ["*", "*"], + ], + "timeline_limit": 0, + }, + "wildcard-type-remote-state-key-list": { + "ranges": [[0, 1]], + "required_state": [ + ["*", "@some:remote"], + # Not a user ID + ["*", "foobarbaz"], + # Not a user ID + ["*", "foo.bar.baz"], + # Not a user ID + ["*", "@foo"], + ], + "timeline_limit": 0, + }, + "remote-member-list": { + "ranges": [[0, 1]], + "required_state": [ + # Own user ID + [EventTypes.Member, user1_id], + # Remote member + [EventTypes.Member, "@some:remote"], + # Local member + [EventTypes.Member, user2_id], + ], + "timeline_limit": 0, + }, + "lazy-but-remote-member-list": { "ranges": [[0, 1]], "required_state": [ - [EventTypes.Create, ""], # Lazy-load room members [EventTypes.Member, StateValues.LAZY], + # Remote member + [EventTypes.Member, "@some:remote"], ], "timeline_limit": 0, }, @@ -693,15 +1387,302 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): } response_body, _ = self.do_sync(sync_body, tok=user1_tok) - # The list should include both rooms now because we're lazy-loading room members - self.assertListEqual( - list(response_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 1], - "room_ids": [room_id2, room_id1], + # Make sure the list includes room1 but room2 is excluded because it's still + # partially-stated + for list_key in response_body["lists"].keys(): + self.assertIncludes( + set(response_body["lists"][list_key]["ops"][0]["room_ids"]), + {room_id1}, + exact=True, + message=f"Expected only fully-stated rooms to show up for list_key={list_key}. Response " + + str(response_body["lists"][list_key]), + ) + + # Take each of the list variants and apply them to room subscriptions to make + # sure the same rules apply + for list_key in sync_body["lists"].keys(): + sync_body_for_subscriptions = { + "room_subscriptions": { + room_id1: { + "required_state": sync_body["lists"][list_key][ + "required_state" + ], + "timeline_limit": 0, + }, + room_id2: { + "required_state": sync_body["lists"][list_key][ + "required_state" + ], + "timeline_limit": 0, + }, + } + } + response_body, _ = self.do_sync(sync_body_for_subscriptions, tok=user1_tok) + + self.assertIncludes( + set(response_body["rooms"].keys()), + {room_id1}, + exact=True, + message=f"Expected only fully-stated rooms to show up for test_key={list_key}.", + ) + + def test_rooms_required_state_expand(self) -> None: + """Test that when we expand the required state argument we get the + expanded state, and not just the changes to the new expanded.""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with a room name. + room_id1 = self.helper.create_room_as( + user1_id, tok=user1_tok, extra_content={"name": "Foo"} + ) + + # Only request the state event to begin with + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, } - ], - response_body["lists"]["foo-list"], + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the room name, even though there haven't been any + # changes. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # We should not see any state changes. + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + + def test_rooms_required_state_expand_retract_expand(self) -> None: + """Test that when expanding, retracting and then expanding the required + state, we get the changes that happened.""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with a room name. + room_id1 = self.helper.create_room_as( + user1_id, tok=user1_tok, extra_content={"name": "Foo"} + ) + + # Only request the state event to begin with + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the room name, even though there haven't been any + # changes. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + # Update the room name + self.helper.send_state( + room_id1, EventTypes.Name, {"name": "Bar"}, state_key="", tok=user1_tok + ) + + # Update the sliding sync requests to exclude the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should not see the updated room name in state (though it will be in + # the timeline). + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the *new* room name, even though there haven't been any + # changes. + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + def test_rooms_required_state_expand_deduplicate(self) -> None: + """Test that when expanding, retracting and then expanding the required + state, we don't get the state down again if it hasn't changed""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with a room name. + room_id1 = self.helper.create_room_as( + user1_id, tok=user1_tok, extra_content={"name": "Foo"} + ) + + # Only request the state event to begin with + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the room name, even though there haven't been any + # changes. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to exclude the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should not see any state updates + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should not see the room name again, as we have already sent that + # down. + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py
index 2e9586ca73..535420209b 100644 --- a/tests/rest/client/sliding_sync/test_rooms_timeline.py +++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py
@@ -14,12 +14,15 @@ import logging from typing import List, Optional +from parameterized import parameterized_class + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin +from synapse.api.constants import EventTypes from synapse.rest.client import login, room, sync from synapse.server import HomeServer -from synapse.types import StreamToken, StrSequence +from synapse.types import StrSequence from synapse.util import Clock from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase @@ -27,6 +30,20 @@ from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase logger = logging.getLogger(__name__) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): """ Test `rooms.timeline` in the Sliding Sync API. @@ -43,6 +60,8 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): self.store = hs.get_datastores().main self.storage_controllers = hs.get_storage_controllers() + super().prepare(reactor, clock, hs) + def _assertListEqual( self, actual_items: StrSequence, @@ -130,16 +149,10 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): user2_tok = self.login(user2_id, "pass") room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.send(room_id1, "activity1", tok=user2_tok) - self.helper.send(room_id1, "activity2", tok=user2_tok) + event_response1 = self.helper.send(room_id1, "activity1", tok=user2_tok) + event_response2 = self.helper.send(room_id1, "activity2", tok=user2_tok) event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok) - event_pos3 = self.get_success( - self.store.get_position_for_event(event_response3["event_id"]) - ) event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok) - event_pos4 = self.get_success( - self.store.get_position_for_event(event_response4["event_id"]) - ) event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok) user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) @@ -177,27 +190,23 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): ) # Check to make sure the `prev_batch` points at the right place - prev_batch_token = self.get_success( - StreamToken.from_string( - self.store, response_body["rooms"][room_id1]["prev_batch"] - ) - ) - prev_batch_room_stream_token_serialized = self.get_success( - prev_batch_token.room_key.to_string(self.store) + prev_batch_token = response_body["rooms"][room_id1]["prev_batch"] + + # If we use the `prev_batch` token to look backwards we should see + # `event3` and older next. + channel = self.make_request( + "GET", + f"/rooms/{room_id1}/messages?from={prev_batch_token}&dir=b&limit=3", + access_token=user1_tok, ) - # If we use the `prev_batch` token to look backwards, we should see `event3` - # next so make sure the token encompasses it - self.assertEqual( - event_pos3.persisted_after(prev_batch_token.room_key), - False, - f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be >= event_pos3={self.get_success(event_pos3.to_room_stream_token().to_string(self.store))}", - ) - # If we use the `prev_batch` token to look backwards, we shouldn't see `event4` - # anymore since it was just returned in this response. - self.assertEqual( - event_pos4.persisted_after(prev_batch_token.room_key), - True, - f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be < event_pos4={self.get_success(event_pos4.to_room_stream_token().to_string(self.store))}", + self.assertEqual(channel.code, 200, channel.json_body) + self.assertListEqual( + [ + event_response3["event_id"], + event_response2["event_id"], + event_response1["event_id"], + ], + [ev["event_id"] for ev in channel.json_body["chunk"]], ) # With no `from_token` (initial sync), it's all historical since there is no @@ -300,8 +309,8 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): self.assertEqual( response_body["rooms"][room_id1]["limited"], False, - f'Our `timeline_limit` was {sync_body["lists"]["foo-list"]["timeline_limit"]} ' - + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. ' + f"Our `timeline_limit` was {sync_body['lists']['foo-list']['timeline_limit']} " + + f"and {len(response_body['rooms'][room_id1]['timeline'])} events were returned in the timeline. " + str(response_body["rooms"][room_id1]), ) # Check to make sure the latest events are returned @@ -378,7 +387,7 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): response_body["rooms"][room_id1]["limited"], True, f"Our `timeline_limit` was {timeline_limit} " - + f'and {len(response_body["rooms"][room_id1]["timeline"])} events were returned in the timeline. ' + + f"and {len(response_body['rooms'][room_id1]['timeline'])} events were returned in the timeline. " + str(response_body["rooms"][room_id1]), ) # Check to make sure that the "live" and historical events are returned @@ -573,3 +582,138 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): # Nothing to see for this banned user in the room in the token range self.assertIsNone(response_body["rooms"].get(room_id1)) + + def test_increasing_timeline_range_sends_more_messages(self) -> None: + """ + Test that increasing the timeline limit via room subscriptions sends the + room down with more messages in a limited sync. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [[EventTypes.Create, ""]], + "timeline_limit": 1, + } + } + } + + message_events = [] + for _ in range(10): + resp = self.helper.send(room_id1, "msg", tok=user1_tok) + message_events.append(resp["event_id"]) + + # Make the first Sliding Sync request + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + room_response = response_body["rooms"][room_id1] + + self.assertEqual(room_response["initial"], True) + self.assertNotIn("unstable_expanded_timeline", room_response) + self.assertEqual(room_response["limited"], True) + + # We only expect the last message at first + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[event["event_id"] for event in room_response["timeline"]], + expected_event_ids=message_events[-1:], + message=str(room_response["timeline"]), + ) + + # We also expect to get the create event state. + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + self._assertRequiredStateIncludes( + room_response["required_state"], + {state_map[(EventTypes.Create, "")]}, + exact=True, + ) + + # Now do another request with a room subscription with an increased timeline limit + sync_body["room_subscriptions"] = { + room_id1: { + "required_state": [], + "timeline_limit": 10, + } + } + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + room_response = response_body["rooms"][room_id1] + + self.assertNotIn("initial", room_response) + self.assertEqual(room_response["unstable_expanded_timeline"], True) + self.assertEqual(room_response["limited"], True) + + # Now we expect all the messages + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[event["event_id"] for event in room_response["timeline"]], + expected_event_ids=message_events, + message=str(room_response["timeline"]), + ) + + # We don't expect to get the room create down, as nothing has changed. + self.assertNotIn("required_state", room_response) + + # Decreasing the timeline limit shouldn't resend any events + sync_body["room_subscriptions"] = { + room_id1: { + "required_state": [], + "timeline_limit": 5, + } + } + + event_response = self.helper.send(room_id1, "msg", tok=user1_tok) + latest_event_id = event_response["event_id"] + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + room_response = response_body["rooms"][room_id1] + + self.assertNotIn("initial", room_response) + self.assertNotIn("unstable_expanded_timeline", room_response) + self.assertEqual(room_response["limited"], False) + + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[event["event_id"] for event in room_response["timeline"]], + expected_event_ids=[latest_event_id], + message=str(room_response["timeline"]), + ) + + # Increasing the limit to what it was before also should not resend any + # events + sync_body["room_subscriptions"] = { + room_id1: { + "required_state": [], + "timeline_limit": 10, + } + } + + event_response = self.helper.send(room_id1, "msg", tok=user1_tok) + latest_event_id = event_response["event_id"] + + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + room_response = response_body["rooms"][room_id1] + + self.assertNotIn("initial", room_response) + self.assertNotIn("unstable_expanded_timeline", room_response) + self.assertEqual(room_response["limited"], False) + + self._assertTimelineEqual( + room_id=room_id1, + actual_event_ids=[event["event_id"] for event in room_response["timeline"]], + expected_event_ids=[latest_event_id], + message=str(room_response["timeline"]), + ) diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py
index cb7638c5ba..dcec5b4cf0 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py
@@ -13,7 +13,9 @@ # import logging from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple +from unittest.mock import AsyncMock +from parameterized import parameterized, parameterized_class from typing_extensions import assert_never from twisted.test.proto_helpers import MemoryReactor @@ -23,10 +25,15 @@ from synapse.api.constants import ( AccountDataTypes, EventContentFields, EventTypes, + JoinRules, + Membership, RoomTypes, ) -from synapse.events import EventBase -from synapse.rest.client import devices, login, receipts, room, sync +from synapse.api.room_versions import RoomVersions +from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict +from synapse.events.snapshot import EventContext +from synapse.handlers.sliding_sync import StateValues +from synapse.rest.client import account_data, devices, login, receipts, room, sync from synapse.server import HomeServer from synapse.types import ( JsonDict, @@ -40,6 +47,7 @@ from synapse.util.stringutils import random_string from tests import unittest from tests.server import TimedOutException +from tests.test_utils.event_injection import create_event logger = logging.getLogger(__name__) @@ -47,8 +55,25 @@ logger = logging.getLogger(__name__) class SlidingSyncBase(unittest.HomeserverTestCase): """Base class for sliding sync test cases""" + # Flag as to whether to use the new sliding sync tables or not + # + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + use_new_tables: bool = True + sync_endpoint = "/_matrix/client/unstable/org.matrix.simplified_msc3575/sync" + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + # https://github.com/element-hq/synapse/issues/17623) + hs.get_datastores().main.have_finished_sliding_sync_background_jobs = AsyncMock( # type: ignore[method-assign] + return_value=self.use_new_tables + ) + def default_config(self) -> JsonDict: config = super().default_config() # Enable sliding sync @@ -122,6 +147,172 @@ class SlidingSyncBase(unittest.HomeserverTestCase): message=str(actual_required_state), ) + def _add_new_dm_to_global_account_data( + self, source_user_id: str, target_user_id: str, target_room_id: str + ) -> None: + """ + Helper to handle inserting a new DM for the source user into global account data + (handles all of the list merging). + + Args: + source_user_id: The user ID of the DM mapping we're going to update + target_user_id: User ID of the person the DM is with + target_room_id: Room ID of the DM + """ + store = self.hs.get_datastores().main + + # Get the current DM map + existing_dm_map = self.get_success( + store.get_global_account_data_by_type_for_user( + source_user_id, AccountDataTypes.DIRECT + ) + ) + # Scrutinize the account data since it has no concrete type. We're just copying + # everything into a known type. It should be a mapping from user ID to a list of + # room IDs. Ignore anything else. + new_dm_map: Dict[str, List[str]] = {} + if isinstance(existing_dm_map, dict): + for user_id, room_ids in existing_dm_map.items(): + if isinstance(user_id, str) and isinstance(room_ids, list): + for room_id in room_ids: + if isinstance(room_id, str): + new_dm_map[user_id] = new_dm_map.get(user_id, []) + [ + room_id + ] + + # Add the new DM to the map + new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [ + target_room_id + ] + # Save the DM map to global account data + self.get_success( + store.add_account_data_for_user( + source_user_id, + AccountDataTypes.DIRECT, + new_dm_map, + ) + ) + + def _create_dm_room( + self, + inviter_user_id: str, + inviter_tok: str, + invitee_user_id: str, + invitee_tok: str, + should_join_room: bool = True, + ) -> str: + """ + Helper to create a DM room as the "inviter" and invite the "invitee" user to the + room. The "invitee" user also will join the room. The `m.direct` account data + will be set for both users. + """ + # Create a room and send an invite the other user + room_id = self.helper.create_room_as( + inviter_user_id, + is_public=False, + tok=inviter_tok, + ) + self.helper.invite( + room_id, + src=inviter_user_id, + targ=invitee_user_id, + tok=inviter_tok, + extra_data={"is_direct": True}, + ) + if should_join_room: + # Person that was invited joins the room + self.helper.join(room_id, invitee_user_id, tok=invitee_tok) + + # Mimic the client setting the room as a direct message in the global account + # data for both users. + self._add_new_dm_to_global_account_data( + invitee_user_id, inviter_user_id, room_id + ) + self._add_new_dm_to_global_account_data( + inviter_user_id, invitee_user_id, room_id + ) + + return room_id + + _remote_invite_count: int = 0 + + def _create_remote_invite_room_for_user( + self, + invitee_user_id: str, + unsigned_invite_room_state: Optional[List[StrippedStateEvent]], + invite_room_id: Optional[str] = None, + ) -> str: + """ + Create a fake invite for a remote room and persist it. + + We don't have any state for these kind of rooms and can only rely on the + stripped state included in the unsigned portion of the invite event to identify + the room. + + Args: + invitee_user_id: The person being invited + unsigned_invite_room_state: List of stripped state events to assist the + receiver in identifying the room. + invite_room_id: Optional remote room ID to be invited to. When unset, we + will generate one. + + Returns: + The room ID of the remote invite room + """ + store = self.hs.get_datastores().main + + if invite_room_id is None: + invite_room_id = f"!test_room{self._remote_invite_count}:remote_server" + + invite_event_dict = { + "room_id": invite_room_id, + "sender": "@inviter:remote_server", + "state_key": invitee_user_id, + # Just keep advancing the depth + "depth": self._remote_invite_count, + "origin_server_ts": 1, + "type": EventTypes.Member, + "content": {"membership": Membership.INVITE}, + "auth_events": [], + "prev_events": [], + } + if unsigned_invite_room_state is not None: + serialized_stripped_state_events = [] + for stripped_event in unsigned_invite_room_state: + serialized_stripped_state_events.append( + { + "type": stripped_event.type, + "state_key": stripped_event.state_key, + "sender": stripped_event.sender, + "content": stripped_event.content, + } + ) + + invite_event_dict["unsigned"] = { + "invite_room_state": serialized_stripped_state_events + } + + invite_event = make_event_from_dict( + invite_event_dict, + room_version=RoomVersions.V10, + ) + invite_event.internal_metadata.outlier = True + invite_event.internal_metadata.out_of_band_membership = True + + self.get_success( + store.maybe_store_room_on_outlier_membership( + room_id=invite_room_id, room_version=invite_event.room_version + ) + ) + context = EventContext.for_outlier(self.hs.get_storage_controllers()) + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + self.get_success(persist_controller.persist_event(invite_event, context)) + + self._remote_invite_count += 1 + + return invite_room_id + def _bump_notifier_wait_for_events( self, user_id: str, @@ -203,6 +394,20 @@ class SlidingSyncBase(unittest.HomeserverTestCase): ) +# FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the +# foreground update for +# `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by +# https://github.com/element-hq/synapse/issues/17623) +@parameterized_class( + ("use_new_tables",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'new' if params_dict['use_new_tables'] else 'fallback'}", +) class SlidingSyncTestCase(SlidingSyncBase): """ Tests regarding MSC3575 Sliding Sync `/sync` endpoint. @@ -218,6 +423,7 @@ class SlidingSyncTestCase(SlidingSyncBase): sync.register_servlets, devices.register_servlets, receipts.register_servlets, + account_data.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -225,93 +431,11 @@ class SlidingSyncTestCase(SlidingSyncBase): self.event_sources = hs.get_event_sources() self.storage_controllers = hs.get_storage_controllers() self.account_data_handler = hs.get_account_data_handler() + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.persistence = persistence - def _add_new_dm_to_global_account_data( - self, source_user_id: str, target_user_id: str, target_room_id: str - ) -> None: - """ - Helper to handle inserting a new DM for the source user into global account data - (handles all of the list merging). - - Args: - source_user_id: The user ID of the DM mapping we're going to update - target_user_id: User ID of the person the DM is with - target_room_id: Room ID of the DM - """ - - # Get the current DM map - existing_dm_map = self.get_success( - self.store.get_global_account_data_by_type_for_user( - source_user_id, AccountDataTypes.DIRECT - ) - ) - # Scrutinize the account data since it has no concrete type. We're just copying - # everything into a known type. It should be a mapping from user ID to a list of - # room IDs. Ignore anything else. - new_dm_map: Dict[str, List[str]] = {} - if isinstance(existing_dm_map, dict): - for user_id, room_ids in existing_dm_map.items(): - if isinstance(user_id, str) and isinstance(room_ids, list): - for room_id in room_ids: - if isinstance(room_id, str): - new_dm_map[user_id] = new_dm_map.get(user_id, []) + [ - room_id - ] - - # Add the new DM to the map - new_dm_map[target_user_id] = new_dm_map.get(target_user_id, []) + [ - target_room_id - ] - # Save the DM map to global account data - self.get_success( - self.store.add_account_data_for_user( - source_user_id, - AccountDataTypes.DIRECT, - new_dm_map, - ) - ) - - def _create_dm_room( - self, - inviter_user_id: str, - inviter_tok: str, - invitee_user_id: str, - invitee_tok: str, - should_join_room: bool = True, - ) -> str: - """ - Helper to create a DM room as the "inviter" and invite the "invitee" user to the - room. The "invitee" user also will join the room. The `m.direct` account data - will be set for both users. - """ - - # Create a room and send an invite the other user - room_id = self.helper.create_room_as( - inviter_user_id, - is_public=False, - tok=inviter_tok, - ) - self.helper.invite( - room_id, - src=inviter_user_id, - targ=invitee_user_id, - tok=inviter_tok, - extra_data={"is_direct": True}, - ) - if should_join_room: - # Person that was invited joins the room - self.helper.join(room_id, invitee_user_id, tok=invitee_tok) - - # Mimic the client setting the room as a direct message in the global account - # data for both users. - self._add_new_dm_to_global_account_data( - invitee_user_id, inviter_user_id, room_id - ) - self._add_new_dm_to_global_account_data( - inviter_user_id, invitee_user_id, room_id - ) - - return room_id + super().prepare(reactor, clock, hs) def test_sync_list(self) -> None: """ @@ -512,288 +636,326 @@ class SlidingSyncTestCase(SlidingSyncBase): # There should be no room sent down. self.assertFalse(channel.json_body["rooms"]) - def test_filter_list(self) -> None: + def test_forgotten_up_to_date(self) -> None: """ - Test that filters apply to `lists` + Make sure we get up-to-date `forgotten` status for rooms """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") user2_id = self.register_user("user2", "pass") user2_tok = self.login(user2_id, "pass") - # Create a DM room - joined_dm_room_id = self._create_dm_room( - inviter_user_id=user1_id, - inviter_tok=user1_tok, - invitee_user_id=user2_id, - invitee_tok=user2_tok, - should_join_room=True, - ) - invited_dm_room_id = self._create_dm_room( - inviter_user_id=user1_id, - inviter_tok=user1_tok, - invitee_user_id=user2_id, - invitee_tok=user2_tok, - should_join_room=False, - ) - - # Create a normal room room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.join(room_id, user1_id, tok=user1_tok) - # Create a room that user1 is invited to - invite_room_id = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.invite(invite_room_id, src=user2_id, targ=user1_id, tok=user2_tok) + # User1 is banned from the room (was never in the room) + self.helper.ban(room_id, src=user2_id, targ=user1_id, tok=user2_tok) - # Make the Sliding Sync request sync_body = { "lists": { - # Absense of filters does not imply "False" values - "all": { + "foo-list": { "ranges": [[0, 99]], "required_state": [], - "timeline_limit": 1, + "timeline_limit": 0, "filters": {}, }, - # Test single truthy filter - "dms": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": True}, - }, - # Test single falsy filter - "non-dms": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": False}, - }, - # Test how multiple filters should stack (AND'd together) - "room-invites": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": {"is_dm": False, "is_invite": True}, - }, } } - response_body, _ = self.do_sync(sync_body, tok=user1_tok) - - # Make sure it has the foo-list we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["all", "dms", "non-dms", "room-invites"], - response_body["lists"].keys(), + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, ) - # Make sure the lists have the correct rooms - self.assertListEqual( - list(response_body["lists"]["all"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [ - invite_room_id, - room_id, - invited_dm_room_id, - joined_dm_room_id, - ], - } - ], - list(response_body["lists"]["all"]), - ) - self.assertListEqual( - list(response_body["lists"]["dms"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [invited_dm_room_id, joined_dm_room_id], - } - ], - list(response_body["lists"]["dms"]), - ) - self.assertListEqual( - list(response_body["lists"]["non-dms"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [invite_room_id, room_id], - } - ], - list(response_body["lists"]["non-dms"]), - ) - self.assertListEqual( - list(response_body["lists"]["room-invites"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [invite_room_id], - } - ], - list(response_body["lists"]["room-invites"]), + # User1 forgets the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, ) + self.assertEqual(channel.code, 200, channel.result) - # Ensure DM's are correctly marked - self.assertDictEqual( - { - room_id: room.get("is_dm") - for room_id, room in response_body["rooms"].items() - }, - { - invite_room_id: None, - room_id: None, - invited_dm_room_id: True, - joined_dm_room_id: True, - }, + # We should no longer see the forgotten room + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + set(), + exact=True, ) - def test_filter_regardless_of_membership_server_left_room(self) -> None: + def test_rejoin_forgotten_room(self) -> None: """ - Test that filters apply to rooms regardless of membership. We're also - compounding the problem by having all of the local users leave the room causing - our server to leave the room. - - We want to make sure that if someone is filtering rooms, and leaves, you still - get that final update down sync that you left. + Make sure we can see a forgotten room again if we rejoin (or any new membership + like an invite) (no longer forgotten) """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") user2_id = self.register_user("user2", "pass") user2_tok = self.login(user2_id, "pass") - # Create a normal room - room_id = self.helper.create_room_as(user1_id, tok=user2_tok) + room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) + # User1 joins the room self.helper.join(room_id, user1_id, tok=user1_tok) - # Create an encrypted space room - space_room_id = self.helper.create_room_as( - user2_id, - tok=user2_tok, - extra_content={ - "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} - }, - ) - self.helper.send_state( - space_room_id, - EventTypes.RoomEncryption, - {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, - tok=user2_tok, + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # We should see the room (like normal) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, ) - self.helper.join(space_room_id, user1_id, tok=user1_tok) - # Make an initial Sliding Sync request + # Leave and forget the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + # User1 forgets the room channel = self.make_request( "POST", - self.sync_endpoint, - { - "lists": { - "all-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 0, - "filters": {}, - }, - "foo-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": { - "is_encrypted": True, - "room_types": [RoomTypes.SPACE], - }, - }, + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Re-join the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # We should see the room again after re-joining + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_invited_to_forgotten_remote_room(self) -> None: + """ + Make sure we can see a forgotten room again if we are invited again + (remote/federated out-of-band memberships) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote room invite (out-of-band membership) + room_id = self._create_remote_invite_room_for_user(user1_id, None) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, } - }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # We should see the room (like normal) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Leave and forget the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + # User1 forgets the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, access_token=user1_tok, ) - self.assertEqual(channel.code, 200, channel.json_body) - from_token = channel.json_body["pos"] + self.assertEqual(channel.code, 200, channel.result) - # Make sure the response has the lists we requested - self.assertListEqual( - list(channel.json_body["lists"].keys()), - ["all-list", "foo-list"], - channel.json_body["lists"].keys(), + # Get invited to the room again + # self.helper.join(room_id, user1_id, tok=user1_tok) + self._create_remote_invite_room_for_user(user1_id, None, invite_room_id=room_id) + + # We should see the room again after re-joining + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, ) - # Make sure the lists have the correct rooms - self.assertListEqual( - list(channel.json_body["lists"]["all-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id, room_id], + def test_reject_remote_invite(self) -> None: + """Test that rejecting a remote invite comes down incremental sync""" + + user_id = self.register_user("user1", "pass") + user_tok = self.login(user_id, "pass") + + # Create a remote room invite (out-of-band membership) + room_id = "!room:remote.server" + self._create_remote_invite_room_for_user(user_id, None, room_id) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [(EventTypes.Member, StateValues.ME)], + "timeline_limit": 3, } - ], + } + } + response_body, from_token = self.do_sync(sync_body, tok=user_tok) + # We should see the room (like normal) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, ) - self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), - [ + + # Reject the remote room invite + self.helper.leave(room_id, user_id, tok=user_tok) + + # Sync again after rejecting the invite + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user_tok) + + # The fix to add the leave event to incremental sync when rejecting a remote + # invite relies on the new tables to work. + if self.use_new_tables: + # We should see the newly_left room + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + # We should see the leave state for the room so clients don't end up with stuck + # invites + self.assertIncludes( { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id], - } - ], + ( + state["type"], + state["state_key"], + state["content"].get("membership"), + ) + for state in response_body["rooms"][room_id]["required_state"] + }, + {(EventTypes.Member, user_id, Membership.LEAVE)}, + exact=True, + ) + + def test_ignored_user_invites_initial_sync(self) -> None: + """ + Make sure we ignore invites if they are from one of the `m.ignored_user_list` on + initial sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a room that user1 is already in + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a room that user2 is already in + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 is invited to room_id2 + self.helper.invite(room_id2, src=user2_id, targ=user1_id, tok=user2_tok) + + # Sync once before we ignore to make sure the rooms can show up + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + }, + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # room_id2 shows up because we haven't ignored the user yet + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id1, room_id2}, + exact=True, ) - # Everyone leaves the encrypted space room - self.helper.leave(space_room_id, user1_id, tok=user1_tok) - self.helper.leave(space_room_id, user2_id, tok=user2_tok) + # User1 ignores user2 + channel = self.make_request( + "PUT", + f"/_matrix/client/v3/user/{user1_id}/account_data/{AccountDataTypes.IGNORED_USER_LIST}", + content={"ignored_users": {user2_id: {}}}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Sync again (initial sync) + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + # The invite for room_id2 should no longer show up because user2 is ignored + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id1}, + exact=True, + ) + + def test_ignored_user_invites_incremental_sync(self) -> None: + """ + Make sure we ignore invites if they are from one of the `m.ignored_user_list` on + incremental sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a room that user1 is already in + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Create a room that user2 is already in + room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok) - # Make an incremental Sliding Sync request + # User1 ignores user2 channel = self.make_request( - "POST", - self.sync_endpoint + f"?pos={from_token}", - { - "lists": { - "all-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 0, - "filters": {}, - }, - "foo-list": { - "ranges": [[0, 99]], - "required_state": [], - "timeline_limit": 1, - "filters": { - "is_encrypted": True, - "room_types": [RoomTypes.SPACE], - }, - }, - } - }, + "PUT", + f"/_matrix/client/v3/user/{user1_id}/account_data/{AccountDataTypes.IGNORED_USER_LIST}", + content={"ignored_users": {user2_id: {}}}, access_token=user1_tok, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, 200, channel.result) - # Make sure the lists have the correct rooms even though we `newly_left` - self.assertListEqual( - list(channel.json_body["lists"]["all-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id, room_id], - } - ], + # Initial sync + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + }, + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # User1 only has membership in room_id1 at this point + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id1}, + exact=True, ) - self.assertListEqual( - list(channel.json_body["lists"]["foo-list"]["ops"]), - [ - { - "op": "SYNC", - "range": [0, 99], - "room_ids": [space_room_id], - } - ], + + # User1 is invited to room_id2 after the initial sync + self.helper.invite(room_id2, src=user2_id, targ=user1_id, tok=user2_tok) + + # Sync again (incremental sync) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + # The invite for room_id2 doesn't show up because user2 is ignored + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id1}, + exact=True, ) def test_sort_list(self) -> None: @@ -812,11 +974,11 @@ class SlidingSyncTestCase(SlidingSyncBase): self.helper.send(room_id1, "activity in room1", tok=user1_tok) self.helper.send(room_id2, "activity in room2", tok=user1_tok) - # Make the Sliding Sync request + # Make the Sliding Sync request where the range includes *some* of the rooms sync_body = { "lists": { "foo-list": { - "ranges": [[0, 99]], + "ranges": [[0, 1]], "required_state": [], "timeline_limit": 1, } @@ -825,25 +987,56 @@ class SlidingSyncTestCase(SlidingSyncBase): response_body, _ = self.do_sync(sync_body, tok=user1_tok) # Make sure it has the foo-list we requested - self.assertListEqual( - list(response_body["lists"].keys()), - ["foo-list"], + self.assertIncludes( response_body["lists"].keys(), + {"foo-list"}, ) - - # Make sure the list is sorted in the way we expect + # Make sure the list is sorted in the way we expect (we only sort when the range + # doesn't include all of the room) self.assertListEqual( list(response_body["lists"]["foo-list"]["ops"]), [ { "op": "SYNC", - "range": [0, 99], - "room_ids": [room_id2, room_id1, room_id3], + "range": [0, 1], + "room_ids": [room_id2, room_id1], } ], response_body["lists"]["foo-list"], ) + # Make the Sliding Sync request where the range includes *all* of the rooms + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 1, + } + } + } + response_body, _ = self.do_sync(sync_body, tok=user1_tok) + + # Make sure it has the foo-list we requested + self.assertIncludes( + response_body["lists"].keys(), + {"foo-list"}, + ) + # Since the range includes all of the rooms, we don't sort the list + self.assertEqual( + len(response_body["lists"]["foo-list"]["ops"]), + 1, + response_body["lists"]["foo-list"], + ) + op = response_body["lists"]["foo-list"]["ops"][0] + self.assertEqual(op["op"], "SYNC") + self.assertEqual(op["range"], [0, 99]) + # Note that we don't sort the rooms when the range includes all of the rooms, so + # we just assert that the rooms are included + self.assertIncludes( + set(op["room_ids"]), {room_id1, room_id2, room_id3}, exact=True + ) + def test_sliced_windows(self) -> None: """ Test that the `lists` `ranges` are sliced correctly. Both sides of each range @@ -972,3 +1165,454 @@ class SlidingSyncTestCase(SlidingSyncBase): # Make the Sliding Sync request response_body, _ = self.do_sync(sync_body, tok=user1_tok) self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + + def test_state_reset_room_comes_down_incremental_sync(self) -> None: + """Test that a room that we were state reset out of comes down + incremental sync""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as( + user2_id, + is_public=True, + tok=user2_tok, + extra_content={ + "name": "my super room", + }, + ) + + # Create an event for us to point back to for the state reset + event_response = self.helper.send(room_id1, "test", tok=user2_tok) + event_id = event_response["event_id"] + + self.helper.join(room_id1, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + # Request all state just to see what we get back when we are + # state reset out of the room + [StateValues.WILDCARD, StateValues.WILDCARD] + ], + "timeline_limit": 1, + } + } + } + + # Make the Sliding Sync request + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # Make sure we see room1 + self.assertIncludes(set(response_body["rooms"].keys()), {room_id1}, exact=True) + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + + # Trigger a state reset + join_rule_event, join_rule_context = self.get_success( + create_event( + self.hs, + prev_event_ids=[event_id], + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.INVITE}, + sender=user2_id, + room_id=room_id1, + room_version=self.get_success(self.store.get_room_version_id(room_id1)), + ) + ) + _, join_rule_event_pos, _ = self.get_success( + self.persistence.persist_event(join_rule_event, join_rule_context) + ) + + # Ensure that the state reset worked and only user2 is in the room now + users_in_room = self.get_success(self.store.get_users_in_room(room_id1)) + self.assertIncludes(set(users_in_room), {user2_id}, exact=True) + + state_map_at_reset = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Update the state after user1 was state reset out of the room + self.helper.send_state( + room_id1, + EventTypes.Name, + {EventContentFields.ROOM_NAME: "my super duper room"}, + tok=user2_tok, + ) + + # Make another Sliding Sync request (incremental) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Expect to see room1 because it is `newly_left` thanks to being state reset out + # of it since the last time we synced. We need to let the client know that + # something happened and that they are no longer in the room. + self.assertIncludes(set(response_body["rooms"].keys()), {room_id1}, exact=True) + # We set `initial=True` to indicate that the client should reset the state they + # have about the room + self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + # They shouldn't see anything past the state reset + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + # We should see all the state events in the room + state_map_at_reset.values(), + exact=True, + ) + # The position where the state reset happened + self.assertEqual( + response_body["rooms"][room_id1]["bump_stamp"], + join_rule_event_pos.stream, + response_body["rooms"][room_id1], + ) + + # Other non-important things. We just want to check what these are so we know + # what happens in a state reset scenario. + # + # Room name was set at the time of the state reset so we should still be able to + # see it. + self.assertEqual(response_body["rooms"][room_id1]["name"], "my super room") + # Could be set but there is no avatar for this room + self.assertIsNone( + response_body["rooms"][room_id1].get("avatar"), + response_body["rooms"][room_id1], + ) + # Could be set but this room isn't marked as a DM + self.assertIsNone( + response_body["rooms"][room_id1].get("is_dm"), + response_body["rooms"][room_id1], + ) + # Empty timeline because we are not in the room at all (they are all being + # filtered out) + self.assertIsNone( + response_body["rooms"][room_id1].get("timeline"), + response_body["rooms"][room_id1], + ) + # `limited` since we're not providing any timeline events but there are some in + # the room. + self.assertEqual(response_body["rooms"][room_id1]["limited"], True) + # User is no longer in the room so they can't see this info + self.assertIsNone( + response_body["rooms"][room_id1].get("joined_count"), + response_body["rooms"][room_id1], + ) + self.assertIsNone( + response_body["rooms"][room_id1].get("invited_count"), + response_body["rooms"][room_id1], + ) + + def test_state_reset_previously_room_comes_down_incremental_sync_with_filters( + self, + ) -> None: + """ + Test that a room that we were state reset out of should always be sent down + regardless of the filters if it has been sent down the connection before. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a space room + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}, + "name": "my super space", + }, + ) + + # Create an event for us to point back to for the state reset + event_response = self.helper.send(space_room_id, "test", tok=user2_tok) + event_id = event_response["event_id"] + + self.helper.join(space_room_id, user1_id, tok=user1_tok) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + # Request all state just to see what we get back when we are + # state reset out of the room + [StateValues.WILDCARD, StateValues.WILDCARD] + ], + "timeline_limit": 1, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + } + } + } + + # Make the Sliding Sync request + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # Make sure we see room1 + self.assertIncludes( + set(response_body["rooms"].keys()), {space_room_id}, exact=True + ) + self.assertEqual(response_body["rooms"][space_room_id]["initial"], True) + + # Trigger a state reset + join_rule_event, join_rule_context = self.get_success( + create_event( + self.hs, + prev_event_ids=[event_id], + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.INVITE}, + sender=user2_id, + room_id=space_room_id, + room_version=self.get_success( + self.store.get_room_version_id(space_room_id) + ), + ) + ) + _, join_rule_event_pos, _ = self.get_success( + self.persistence.persist_event(join_rule_event, join_rule_context) + ) + + # Ensure that the state reset worked and only user2 is in the room now + users_in_room = self.get_success(self.store.get_users_in_room(space_room_id)) + self.assertIncludes(set(users_in_room), {user2_id}, exact=True) + + state_map_at_reset = self.get_success( + self.storage_controllers.state.get_current_state(space_room_id) + ) + + # Update the state after user1 was state reset out of the room + self.helper.send_state( + space_room_id, + EventTypes.Name, + {EventContentFields.ROOM_NAME: "my super duper space"}, + tok=user2_tok, + ) + + # User2 also leaves the room so the server is no longer participating in the room + # and we don't have access to current state + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + + # Make another Sliding Sync request (incremental) + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Expect to see room1 because it is `newly_left` thanks to being state reset out + # of it since the last time we synced. We need to let the client know that + # something happened and that they are no longer in the room. + self.assertIncludes( + set(response_body["rooms"].keys()), {space_room_id}, exact=True + ) + # We set `initial=True` to indicate that the client should reset the state they + # have about the room + self.assertEqual(response_body["rooms"][space_room_id]["initial"], True) + # They shouldn't see anything past the state reset + self._assertRequiredStateIncludes( + response_body["rooms"][space_room_id]["required_state"], + # We should see all the state events in the room + state_map_at_reset.values(), + exact=True, + ) + # The position where the state reset happened + self.assertEqual( + response_body["rooms"][space_room_id]["bump_stamp"], + join_rule_event_pos.stream, + response_body["rooms"][space_room_id], + ) + + # Other non-important things. We just want to check what these are so we know + # what happens in a state reset scenario. + # + # Room name was set at the time of the state reset so we should still be able to + # see it. + self.assertEqual( + response_body["rooms"][space_room_id]["name"], "my super space" + ) + # Could be set but there is no avatar for this room + self.assertIsNone( + response_body["rooms"][space_room_id].get("avatar"), + response_body["rooms"][space_room_id], + ) + # Could be set but this room isn't marked as a DM + self.assertIsNone( + response_body["rooms"][space_room_id].get("is_dm"), + response_body["rooms"][space_room_id], + ) + # Empty timeline because we are not in the room at all (they are all being + # filtered out) + self.assertIsNone( + response_body["rooms"][space_room_id].get("timeline"), + response_body["rooms"][space_room_id], + ) + # `limited` since we're not providing any timeline events but there are some in + # the room. + self.assertEqual(response_body["rooms"][space_room_id]["limited"], True) + # User is no longer in the room so they can't see this info + self.assertIsNone( + response_body["rooms"][space_room_id].get("joined_count"), + response_body["rooms"][space_room_id], + ) + self.assertIsNone( + response_body["rooms"][space_room_id].get("invited_count"), + response_body["rooms"][space_room_id], + ) + + @parameterized.expand( + [ + ("server_leaves_room", True), + ("server_participating_in_room", False), + ] + ) + def test_state_reset_never_room_incremental_sync_with_filters( + self, test_description: str, server_leaves_room: bool + ) -> None: + """ + Test that a room that we were state reset out of should be sent down if we can + figure out the state or if it was sent down the connection before. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create a space room + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}, + "name": "my super space", + }, + ) + + # Create another space room + space_room_id2 = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}, + }, + ) + + # Create an event for us to point back to for the state reset + event_response = self.helper.send(space_room_id, "test", tok=user2_tok) + event_id = event_response["event_id"] + + # User1 joins the rooms + # + self.helper.join(space_room_id, user1_id, tok=user1_tok) + # Join space_room_id2 so that it is at the top of the list + self.helper.join(space_room_id2, user1_id, tok=user1_tok) + + # Make a SS request for only the top room. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 0]], + "required_state": [ + # Request all state just to see what we get back when we are + # state reset out of the room + [StateValues.WILDCARD, StateValues.WILDCARD] + ], + "timeline_limit": 1, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + } + } + } + + # Make the Sliding Sync request + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # Make sure we only see space_room_id2 + self.assertIncludes( + set(response_body["rooms"].keys()), {space_room_id2}, exact=True + ) + self.assertEqual(response_body["rooms"][space_room_id2]["initial"], True) + + # Just create some activity in space_room_id2 so it appears when we incremental sync again + self.helper.send(space_room_id2, "test", tok=user2_tok) + + # Trigger a state reset + join_rule_event, join_rule_context = self.get_success( + create_event( + self.hs, + prev_event_ids=[event_id], + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.INVITE}, + sender=user2_id, + room_id=space_room_id, + room_version=self.get_success( + self.store.get_room_version_id(space_room_id) + ), + ) + ) + _, join_rule_event_pos, _ = self.get_success( + self.persistence.persist_event(join_rule_event, join_rule_context) + ) + + # Ensure that the state reset worked and only user2 is in the room now + users_in_room = self.get_success(self.store.get_users_in_room(space_room_id)) + self.assertIncludes(set(users_in_room), {user2_id}, exact=True) + + # Update the state after user1 was state reset out of the room. + # This will also bump it to the top of the list. + self.helper.send_state( + space_room_id, + EventTypes.Name, + {EventContentFields.ROOM_NAME: "my super duper space"}, + tok=user2_tok, + ) + + if server_leaves_room: + # User2 also leaves the room so the server is no longer participating in the room + # and we don't have access to current state + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + + # Make another Sliding Sync request (incremental) + sync_body = { + "lists": { + "foo-list": { + # Expand the range to include all rooms + "ranges": [[0, 1]], + "required_state": [ + # Request all state just to see what we get back when we are + # state reset out of the room + [StateValues.WILDCARD, StateValues.WILDCARD] + ], + "timeline_limit": 1, + "filters": { + "room_types": [RoomTypes.SPACE], + }, + } + } + } + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + if self.use_new_tables: + if server_leaves_room: + # We still only expect to see space_room_id2 because even though we were state + # reset out of space_room_id, it was never sent down the connection before so we + # don't need to bother the client with it. + self.assertIncludes( + set(response_body["rooms"].keys()), {space_room_id2}, exact=True + ) + else: + # Both rooms show up because we can figure out the state for the + # `filters.room_types` if someone is still in the room (we look at the + # current state because `room_type` never changes). + self.assertIncludes( + set(response_body["rooms"].keys()), + {space_room_id, space_room_id2}, + exact=True, + ) + else: + # Both rooms show up because we can actually take the time to figure out the + # state for the `filters.room_types` in the fallback path (we look at + # historical state for `LEAVE` membership). + self.assertIncludes( + set(response_body["rooms"].keys()), + {space_room_id, space_room_id2}, + exact=True, + ) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py
index a85ea994de..33611e8a8c 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py
@@ -36,7 +36,6 @@ from synapse.api.errors import Codes, HttpResponseException from synapse.appservice import ApplicationService from synapse.rest import admin from synapse.rest.client import account, login, register, room -from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource from synapse.server import HomeServer from synapse.storage._base import db_to_json from synapse.types import JsonDict, UserID @@ -47,430 +46,404 @@ from tests.server import FakeSite, make_request from tests.unittest import override_config -class PasswordResetTestCase(unittest.HomeserverTestCase): - servlets = [ - account.register_servlets, - synapse.rest.admin.register_servlets_for_client_rest_resource, - register.register_servlets, - login.register_servlets, - ] - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() - - # Email config. - config["email"] = { - "enable_notifs": False, - "template_dir": os.path.abspath( - pkg_resources.resource_filename("synapse", "res/templates") - ), - "smtp_host": "127.0.0.1", - "smtp_port": 20, - "require_transport_security": False, - "smtp_user": None, - "smtp_pass": None, - "notif_from": "test@example.com", - } - config["public_baseurl"] = "https://example.com" - - hs = self.setup_test_homeserver(config=config) - - async def sendmail( - reactor: IReactorTCP, - smtphost: str, - smtpport: int, - from_addr: str, - to_addr: str, - msg_bytes: bytes, - *args: Any, - **kwargs: Any, - ) -> None: - self.email_attempts.append(msg_bytes) - - self.email_attempts: List[bytes] = [] - hs.get_send_email_handler()._sendmail = sendmail - - return hs - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - self.submit_token_resource = PasswordResetSubmitTokenResource(hs) - - def attempt_wrong_password_login(self, username: str, password: str) -> None: - """Attempts to login as the user with the given password, asserting - that the attempt *fails*. - """ - body = {"type": "m.login.password", "user": username, "password": password} - - channel = self.make_request("POST", "/_matrix/client/r0/login", body) - self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) - - def test_basic_password_reset(self) -> None: - """Test basic password reset flow""" - old_password = "monkey" - new_password = "kangeroo" - - user_id = self.register_user("kermit", old_password) - self.login("kermit", old_password) - - email = "test@example.com" - - # Add a threepid - self.get_success( - self.store.user_add_threepid( - user_id=user_id, - medium="email", - address=email, - validated_at=0, - added_at=0, - ) - ) - - client_secret = "foobar" - session_id = self._request_token(email, client_secret) - - self.assertEqual(len(self.email_attempts), 1) - link = self._get_link_from_email() - - self._validate_token(link) - - self._reset_password(new_password, session_id, client_secret) - - # Assert we can log in with the new password - self.login("kermit", new_password) - - # Assert we can't log in with the old password - self.attempt_wrong_password_login("kermit", old_password) - - # Check that the UI Auth information doesn't store the password in the database. - # - # Note that we don't have the UI Auth session ID, so just pull out the single - # row. - result = self.get_success( - self.store.db_pool.simple_select_one_onecol( - "ui_auth_sessions", keyvalues={}, retcol="clientdict" - ) - ) - client_dict = db_to_json(result) - self.assertNotIn("new_password", client_dict) - - @override_config({"rc_3pid_validation": {"burst_count": 3}}) - def test_ratelimit_by_email(self) -> None: - """Test that we ratelimit /requestToken for the same email.""" - old_password = "monkey" - new_password = "kangeroo" - - user_id = self.register_user("kermit", old_password) - self.login("kermit", old_password) - - email = "test1@example.com" - - # Add a threepid - self.get_success( - self.store.user_add_threepid( - user_id=user_id, - medium="email", - address=email, - validated_at=0, - added_at=0, - ) - ) - - def reset(ip: str) -> None: - client_secret = "foobar" - session_id = self._request_token(email, client_secret, ip) - - self.assertEqual(len(self.email_attempts), 1) - link = self._get_link_from_email() - - self._validate_token(link) - - self._reset_password(new_password, session_id, client_secret) - - self.email_attempts.clear() - - # We expect to be able to make three requests before getting rate - # limited. - # - # We change IPs to ensure that we're not being ratelimited due to the - # same IP - reset("127.0.0.1") - reset("127.0.0.2") - reset("127.0.0.3") - - with self.assertRaises(HttpResponseException) as cm: - reset("127.0.0.4") - - self.assertEqual(cm.exception.code, 429) - - def test_basic_password_reset_canonicalise_email(self) -> None: - """Test basic password reset flow - Request password reset with different spelling - """ - old_password = "monkey" - new_password = "kangeroo" - - user_id = self.register_user("kermit", old_password) - self.login("kermit", old_password) - - email_profile = "test@example.com" - email_passwort_reset = "TEST@EXAMPLE.COM" - - # Add a threepid - self.get_success( - self.store.user_add_threepid( - user_id=user_id, - medium="email", - address=email_profile, - validated_at=0, - added_at=0, - ) - ) - - client_secret = "foobar" - session_id = self._request_token(email_passwort_reset, client_secret) - - self.assertEqual(len(self.email_attempts), 1) - link = self._get_link_from_email() - - self._validate_token(link) - - self._reset_password(new_password, session_id, client_secret) - - # Assert we can log in with the new password - self.login("kermit", new_password) - - # Assert we can't log in with the old password - self.attempt_wrong_password_login("kermit", old_password) - - def test_cant_reset_password_without_clicking_link(self) -> None: - """Test that we do actually need to click the link in the email""" - old_password = "monkey" - new_password = "kangeroo" - - user_id = self.register_user("kermit", old_password) - self.login("kermit", old_password) - - email = "test@example.com" +# class PasswordResetTestCase(unittest.HomeserverTestCase): +# servlets = [ +# account.register_servlets, +# synapse.rest.admin.register_servlets_for_client_rest_resource, +# register.register_servlets, +# login.register_servlets, +# ] + +# def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: +# config = self.default_config() + +# # Email config. +# config["email"] = { +# "enable_notifs": False, +# "template_dir": os.path.abspath( +# pkg_resources.resource_filename("synapse", "res/templates") +# ), +# "smtp_host": "127.0.0.1", +# "smtp_port": 20, +# "require_transport_security": False, +# "smtp_user": None, +# "smtp_pass": None, +# "notif_from": "test@example.com", +# } +# config["public_baseurl"] = "https://example.com" + +# hs = self.setup_test_homeserver(config=config) - # Add a threepid - self.get_success( - self.store.user_add_threepid( - user_id=user_id, - medium="email", - address=email, - validated_at=0, - added_at=0, - ) - ) - - client_secret = "foobar" - session_id = self._request_token(email, client_secret) - - self.assertEqual(len(self.email_attempts), 1) - - # Attempt to reset password without clicking the link - self._reset_password(new_password, session_id, client_secret, expected_code=401) - - # Assert we can log in with the old password - self.login("kermit", old_password) - - # Assert we can't log in with the new password - self.attempt_wrong_password_login("kermit", new_password) - - def test_no_valid_token(self) -> None: - """Test that we do actually need to request a token and can't just - make a session up. - """ - old_password = "monkey" - new_password = "kangeroo" - - user_id = self.register_user("kermit", old_password) - self.login("kermit", old_password) - - email = "test@example.com" - - # Add a threepid - self.get_success( - self.store.user_add_threepid( - user_id=user_id, - medium="email", - address=email, - validated_at=0, - added_at=0, - ) - ) - - client_secret = "foobar" - session_id = "weasle" +# async def sendmail( +# reactor: IReactorTCP, +# smtphost: str, +# smtpport: int, +# from_addr: str, +# to_addr: str, +# msg_bytes: bytes, +# *args: Any, +# **kwargs: Any, +# ) -> None: +# self.email_attempts.append(msg_bytes) - # Attempt to reset password without even requesting an email - self._reset_password(new_password, session_id, client_secret, expected_code=401) +# self.email_attempts: List[bytes] = [] + +# return hs - # Assert we can log in with the old password - self.login("kermit", old_password) +# def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: +# self.store = hs.get_datastores().main + +# def attempt_wrong_password_login(self, username: str, password: str) -> None: +# """Attempts to login as the user with the given password, asserting +# that the attempt *fails*. +# """ +# body = {"type": "m.login.password", "user": username, "password": password} + +# channel = self.make_request("POST", "/_matrix/client/r0/login", body) +# self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) + +# def test_basic_password_reset(self) -> None: +# """Test basic password reset flow""" +# old_password = "monkey" +# new_password = "kangeroo" + +# user_id = self.register_user("kermit", old_password) +# self.login("kermit", old_password) - # Assert we can't log in with the new password - self.attempt_wrong_password_login("kermit", new_password) - @unittest.override_config({"request_token_inhibit_3pid_errors": True}) - def test_password_reset_bad_email_inhibit_error(self) -> None: - """Test that triggering a password reset with an email address that isn't bound - to an account doesn't leak the lack of binding for that address if configured - that way. - """ - self.register_user("kermit", "monkey") - self.login("kermit", "monkey") +# client_secret = "foobar" +# session_id = self._request_token(email, client_secret) - email = "test@example.com" +# self.assertEqual(len(self.email_attempts), 1) +# link = self._get_link_from_email() - client_secret = "foobar" - session_id = self._request_token(email, client_secret) +# self._validate_token(link) - self.assertIsNotNone(session_id) +# self._reset_password(new_password, session_id, client_secret) - def test_password_reset_redirection(self) -> None: - """Test basic password reset flow""" - old_password = "monkey" +# # Assert we can log in with the new password +# self.login("kermit", new_password) - user_id = self.register_user("kermit", old_password) - self.login("kermit", old_password) +# # Assert we can't log in with the old password +# self.attempt_wrong_password_login("kermit", old_password) - email = "test@example.com" +# # Check that the UI Auth information doesn't store the password in the database. +# # +# # Note that we don't have the UI Auth session ID, so just pull out the single +# # row. +# result = self.get_success( +# self.store.db_pool.simple_select_one_onecol( +# "ui_auth_sessions", keyvalues={}, retcol="clientdict" +# ) +# ) +# client_dict = db_to_json(result) +# self.assertNotIn("new_password", client_dict) + +# @override_config({"rc_3pid_validation": {"burst_count": 3}}) +# def test_ratelimit_by_email(self) -> None: +# """Test that we ratelimit /requestToken for the same email.""" +# old_password = "monkey" +# new_password = "kangeroo" - # Add a threepid - self.get_success( - self.store.user_add_threepid( - user_id=user_id, - medium="email", - address=email, - validated_at=0, - added_at=0, - ) - ) +# user_id = self.register_user("kermit", old_password) +# self.login("kermit", old_password) + + +# def reset(ip: str) -> None: +# client_secret = "foobar" +# session_id = self._request_token(email, client_secret, ip) - client_secret = "foobar" - next_link = "http://example.com" - self._request_token(email, client_secret, "127.0.0.1", next_link) +# self.assertEqual(len(self.email_attempts), 1) +# link = self._get_link_from_email() + +# self._validate_token(link) + +# self._reset_password(new_password, session_id, client_secret) + +# self.email_attempts.clear() - self.assertEqual(len(self.email_attempts), 1) - link = self._get_link_from_email() +# # We expect to be able to make three requests before getting rate +# # limited. +# # +# # We change IPs to ensure that we're not being ratelimited due to the +# # same IP +# reset("127.0.0.1") +# reset("127.0.0.2") +# reset("127.0.0.3") - self._validate_token(link, next_link) +# with self.assertRaises(HttpResponseException) as cm: +# reset("127.0.0.4") - def _request_token( - self, - email: str, - client_secret: str, - ip: str = "127.0.0.1", - next_link: Optional[str] = None, - ) -> str: - body = {"client_secret": client_secret, "email": email, "send_attempt": 1} - if next_link is not None: - body["next_link"] = next_link - channel = self.make_request( - "POST", - b"account/password/email/requestToken", - body, - client_ip=ip, - ) +# self.assertEqual(cm.exception.code, 429) - if channel.code != 200: - raise HttpResponseException( - channel.code, - channel.result["reason"], - channel.result["body"], - ) +# def test_basic_password_reset_canonicalise_email(self) -> None: +# """Test basic password reset flow +# Request password reset with different spelling +# """ +# old_password = "monkey" +# new_password = "kangeroo" - return channel.json_body["sid"] +# user_id = self.register_user("kermit", old_password) +# self.login("kermit", old_password) - def _validate_token(self, link: str, next_link: Optional[str] = None) -> None: - # Remove the host - path = link.replace("https://example.com", "") +# email_profile = "test@example.com" +# email_passwort_reset = "TEST@EXAMPLE.COM" - # Load the password reset confirmation page - channel = make_request( - self.reactor, - FakeSite(self.submit_token_resource, self.reactor), - "GET", - path, - shorthand=False, - ) +# # Add a threepid +# self.get_success( +# self.store.user_add_threepid( +# user_id=user_id, +# medium="email", +# address=email_profile, +# validated_at=0, +# added_at=0, +# ) +# ) - self.assertEqual(HTTPStatus.OK, channel.code, channel.result) +# client_secret = "foobar" +# session_id = self._request_token(email_passwort_reset, client_secret) - # Now POST to the same endpoint, mimicking the same behaviour as clicking the - # password reset confirm button +# self.assertEqual(len(self.email_attempts), 1) +# link = self._get_link_from_email() - # Confirm the password reset - channel = make_request( - self.reactor, - FakeSite(self.submit_token_resource, self.reactor), - "POST", - path, - content=b"", - shorthand=False, - content_is_form=True, - ) - self.assertEqual( - HTTPStatus.OK if next_link is None else HTTPStatus.FOUND, - channel.code, - channel.result, - ) - - def _get_link_from_email(self) -> str: - assert self.email_attempts, "No emails have been sent" +# self._validate_token(link) - raw_msg = self.email_attempts[-1].decode("UTF-8") - mail = Parser().parsestr(raw_msg) +# self._reset_password(new_password, session_id, client_secret) - text = None - for part in mail.walk(): - if part.get_content_type() == "text/plain": - text = part.get_payload(decode=True) - if text is not None: - # According to the logic table in `get_payload`, we know that - # the result of `get_payload` will be `bytes`, but mypy doesn't - # know this and complains. Thus, we assert the type. - assert isinstance(text, bytes) - text = text.decode("UTF-8") +# # Assert we can log in with the new password +# self.login("kermit", new_password) - break +# # Assert we can't log in with the old password +# self.attempt_wrong_password_login("kermit", old_password) - if not text: - self.fail("Could not find text portion of email to parse") +# def test_cant_reset_password_without_clicking_link(self) -> None: +# """Test that we do actually need to click the link in the email""" +# old_password = "monkey" +# new_password = "kangeroo" + +# user_id = self.register_user("kermit", old_password) +# self.login("kermit", old_password) + +# email = "test@example.com" + +# # Add a threepid +# self.get_success( +# self.store.user_add_threepid( +# user_id=user_id, +# medium="email", +# address=email, +# validated_at=0, +# added_at=0, +# ) +# ) - # `text` must be a `str`, after being decoded and determined just above - # to not be `None` or an empty `str`. - assert isinstance(text, str) - - match = re.search(r"https://example.com\S+", text) - assert match, "Could not find link in email" - - return match.group(0) - - def _reset_password( - self, - new_password: str, - session_id: str, - client_secret: str, - expected_code: int = HTTPStatus.OK, - ) -> None: - channel = self.make_request( - "POST", - b"account/password", - { - "new_password": new_password, - "auth": { - "type": LoginType.EMAIL_IDENTITY, - "threepid_creds": { - "client_secret": client_secret, - "sid": session_id, - }, - }, - }, - ) - self.assertEqual(expected_code, channel.code, channel.result) +# client_secret = "foobar" +# session_id = self._request_token(email, client_secret) + +# self.assertEqual(len(self.email_attempts), 1) + +# # Attempt to reset password without clicking the link +# self._reset_password(new_password, session_id, client_secret, expected_code=401) + +# # Assert we can log in with the old password +# self.login("kermit", old_password) + +# # Assert we can't log in with the new password +# self.attempt_wrong_password_login("kermit", new_password) + +# def test_no_valid_token(self) -> None: +# """Test that we do actually need to request a token and can't just +# make a session up. +# """ +# old_password = "monkey" +# new_password = "kangeroo" + +# user_id = self.register_user("kermit", old_password) +# self.login("kermit", old_password) + +# email = "test@example.com" + +# # Add a threepid +# self.get_success( +# self.store.user_add_threepid( +# user_id=user_id, +# medium="email", +# address=email, +# validated_at=0, +# added_at=0, +# ) +# ) + +# client_secret = "foobar" +# session_id = "weasle" + +# # Attempt to reset password without even requesting an email +# self._reset_password(new_password, session_id, client_secret, expected_code=401) + +# # Assert we can log in with the old password +# self.login("kermit", old_password) + +# # Assert we can't log in with the new password +# self.attempt_wrong_password_login("kermit", new_password) + +# @unittest.override_config({"request_token_inhibit_3pid_errors": True}) +# def test_password_reset_bad_email_inhibit_error(self) -> None: +# """Test that triggering a password reset with an email address that isn't bound +# to an account doesn't leak the lack of binding for that address if configured +# that way. +# """ +# self.register_user("kermit", "monkey") +# self.login("kermit", "monkey") + +# email = "test@example.com" + +# client_secret = "foobar" +# session_id = self._request_token(email, client_secret) + +# self.assertIsNotNone(session_id) + +# def test_password_reset_redirection(self) -> None: +# """Test basic password reset flow""" +# old_password = "monkey" + +# user_id = self.register_user("kermit", old_password) +# self.login("kermit", old_password) + +# email = "test@example.com" + +# # Add a threepid +# self.get_success( +# self.store.user_add_threepid( +# user_id=user_id, +# medium="email", +# address=email, +# validated_at=0, +# added_at=0, +# ) +# ) + +# client_secret = "foobar" +# next_link = "http://example.com" +# self._request_token(email, client_secret, "127.0.0.1", next_link) + +# self.assertEqual(len(self.email_attempts), 1) +# link = self._get_link_from_email() + +# self._validate_token(link, next_link) + +# def _request_token( +# self, +# email: str, +# client_secret: str, +# ip: str = "127.0.0.1", +# next_link: Optional[str] = None, +# ) -> str: +# body = {"client_secret": client_secret, "email": email, "send_attempt": 1} +# if next_link is not None: +# body["next_link"] = next_link +# channel = self.make_request( +# "POST", +# b"account/password/email/requestToken", +# body, +# client_ip=ip, +# ) + +# if channel.code != 200: +# raise HttpResponseException( +# channel.code, +# channel.result["reason"], +# channel.result["body"], +# ) + +# return channel.json_body["sid"] + +# def _validate_token(self, link: str, next_link: Optional[str] = None) -> None: +# # Remove the host +# path = link.replace("https://example.com", "") + +# # Load the password reset confirmation page +# channel = make_request( +# self.reactor, +# FakeSite(self.submit_token_resource, self.reactor), +# "GET", +# path, +# shorthand=False, +# ) + +# self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + +# # Now POST to the same endpoint, mimicking the same behaviour as clicking the +# # password reset confirm button + +# # Confirm the password reset +# channel = make_request( +# self.reactor, +# FakeSite(self.submit_token_resource, self.reactor), +# "POST", +# path, +# content=b"", +# shorthand=False, +# content_is_form=True, +# ) +# self.assertEqual( +# HTTPStatus.OK if next_link is None else HTTPStatus.FOUND, +# channel.code, +# channel.result, +# ) + +# def _get_link_from_email(self) -> str: +# assert self.email_attempts, "No emails have been sent" + +# raw_msg = self.email_attempts[-1].decode("UTF-8") +# mail = Parser().parsestr(raw_msg) + +# text = None +# for part in mail.walk(): +# if part.get_content_type() == "text/plain": +# text = part.get_payload(decode=True) +# if text is not None: +# # According to the logic table in `get_payload`, we know that +# # the result of `get_payload` will be `bytes`, but mypy doesn't +# # know this and complains. Thus, we assert the type. +# assert isinstance(text, bytes) +# text = text.decode("UTF-8") + +# break + +# if not text: +# self.fail("Could not find text portion of email to parse") + +# # `text` must be a `str`, after being decoded and determined just above +# # to not be `None` or an empty `str`. +# assert isinstance(text, str) + +# match = re.search(r"https://example.com\S+", text) +# assert match, "Could not find link in email" + +# return match.group(0) + +# def _reset_password( +# self, +# new_password: str, +# session_id: str, +# client_secret: str, +# expected_code: int = HTTPStatus.OK, +# ) -> None: +# channel = self.make_request( +# "POST", +# b"account/password", +# { +# "new_password": new_password, +# "auth": { +# "type": LoginType.EMAIL_IDENTITY, +# "threepid_creds": { +# "client_secret": client_secret, +# "sid": session_id, +# }, +# }, +# }, +# ) +# self.assertEqual(expected_code, channel.code, channel.result) class DeactivateTestCase(unittest.HomeserverTestCase): @@ -787,503 +760,6 @@ class WhoamiTestCase(unittest.HomeserverTestCase): return channel.json_body -class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): - servlets = [ - account.register_servlets, - login.register_servlets, - synapse.rest.admin.register_servlets_for_client_rest_resource, - ] - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() - - # Email config. - config["email"] = { - "enable_notifs": False, - "template_dir": os.path.abspath( - pkg_resources.resource_filename("synapse", "res/templates") - ), - "smtp_host": "127.0.0.1", - "smtp_port": 20, - "require_transport_security": False, - "smtp_user": None, - "smtp_pass": None, - "notif_from": "test@example.com", - } - config["public_baseurl"] = "https://example.com" - - self.hs = self.setup_test_homeserver(config=config) - - async def sendmail( - reactor: IReactorTCP, - smtphost: str, - smtpport: int, - from_addr: str, - to_addr: str, - msg_bytes: bytes, - *args: Any, - **kwargs: Any, - ) -> None: - self.email_attempts.append(msg_bytes) - - self.email_attempts: List[bytes] = [] - self.hs.get_send_email_handler()._sendmail = sendmail - - return self.hs - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastores().main - - self.user_id = self.register_user("kermit", "test") - self.user_id_tok = self.login("kermit", "test") - self.email = "test@example.com" - self.url_3pid = b"account/3pid" - - def test_add_valid_email(self) -> None: - self._add_email(self.email, self.email) - - def test_add_valid_email_second_time(self) -> None: - self._add_email(self.email, self.email) - self._request_token_invalid_email( - self.email, - expected_errcode=Codes.THREEPID_IN_USE, - expected_error="Email is already in use", - ) - - def test_add_valid_email_second_time_canonicalise(self) -> None: - self._add_email(self.email, self.email) - self._request_token_invalid_email( - "TEST@EXAMPLE.COM", - expected_errcode=Codes.THREEPID_IN_USE, - expected_error="Email is already in use", - ) - - def test_add_email_no_at(self) -> None: - self._request_token_invalid_email( - "address-without-at.bar", - expected_errcode=Codes.BAD_JSON, - expected_error="Unable to parse email address", - ) - - def test_add_email_two_at(self) -> None: - self._request_token_invalid_email( - "foo@foo@test.bar", - expected_errcode=Codes.BAD_JSON, - expected_error="Unable to parse email address", - ) - - def test_add_email_bad_format(self) -> None: - self._request_token_invalid_email( - "user@bad.example.net@good.example.com", - expected_errcode=Codes.BAD_JSON, - expected_error="Unable to parse email address", - ) - - def test_add_email_domain_to_lower(self) -> None: - self._add_email("foo@TEST.BAR", "foo@test.bar") - - def test_add_email_domain_with_umlaut(self) -> None: - self._add_email("foo@Öumlaut.com", "foo@öumlaut.com") - - def test_add_email_address_casefold(self) -> None: - self._add_email("Strauß@Example.com", "strauss@example.com") - - def test_address_trim(self) -> None: - self._add_email(" foo@test.bar ", "foo@test.bar") - - @override_config({"rc_3pid_validation": {"burst_count": 3}}) - def test_ratelimit_by_ip(self) -> None: - """Tests that adding emails is ratelimited by IP""" - - # We expect to be able to set three emails before getting ratelimited. - self._add_email("foo1@test.bar", "foo1@test.bar") - self._add_email("foo2@test.bar", "foo2@test.bar") - self._add_email("foo3@test.bar", "foo3@test.bar") - - with self.assertRaises(HttpResponseException) as cm: - self._add_email("foo4@test.bar", "foo4@test.bar") - - self.assertEqual(cm.exception.code, 429) - - def test_add_email_if_disabled(self) -> None: - """Test adding email to profile when doing so is disallowed""" - self.hs.config.registration.enable_3pid_changes = False - - client_secret = "foobar" - channel = self.make_request( - "POST", - b"/_matrix/client/unstable/account/3pid/email/requestToken", - { - "client_secret": client_secret, - "email": "test@example.com", - "send_attempt": 1, - }, - ) - - self.assertEqual( - HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] - ) - - self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - - def test_delete_email(self) -> None: - """Test deleting an email from profile""" - # Add a threepid - self.get_success( - self.store.user_add_threepid( - user_id=self.user_id, - medium="email", - address=self.email, - validated_at=0, - added_at=0, - ) - ) - - channel = self.make_request( - "POST", - b"account/3pid/delete", - {"medium": "email", "address": self.email}, - access_token=self.user_id_tok, - ) - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) - - # Get user - channel = self.make_request( - "GET", - self.url_3pid, - access_token=self.user_id_tok, - ) - - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) - self.assertFalse(channel.json_body["threepids"]) - - def test_delete_email_if_disabled(self) -> None: - """Test deleting an email from profile when disallowed""" - self.hs.config.registration.enable_3pid_changes = False - - # Add a threepid - self.get_success( - self.store.user_add_threepid( - user_id=self.user_id, - medium="email", - address=self.email, - validated_at=0, - added_at=0, - ) - ) - - channel = self.make_request( - "POST", - b"account/3pid/delete", - {"medium": "email", "address": self.email}, - access_token=self.user_id_tok, - ) - - self.assertEqual( - HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] - ) - self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - - # Get user - channel = self.make_request( - "GET", - self.url_3pid, - access_token=self.user_id_tok, - ) - - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) - - def test_cant_add_email_without_clicking_link(self) -> None: - """Test that we do actually need to click the link in the email""" - client_secret = "foobar" - session_id = self._request_token(self.email, client_secret) - - self.assertEqual(len(self.email_attempts), 1) - - # Attempt to add email without clicking the link - channel = self.make_request( - "POST", - b"/_matrix/client/unstable/account/3pid/add", - { - "client_secret": client_secret, - "sid": session_id, - "auth": { - "type": "m.login.password", - "user": self.user_id, - "password": "test", - }, - }, - access_token=self.user_id_tok, - ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] - ) - self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) - - # Get user - channel = self.make_request( - "GET", - self.url_3pid, - access_token=self.user_id_tok, - ) - - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) - self.assertFalse(channel.json_body["threepids"]) - - def test_no_valid_token(self) -> None: - """Test that we do actually need to request a token and can't just - make a session up. - """ - client_secret = "foobar" - session_id = "weasle" - - # Attempt to add email without even requesting an email - channel = self.make_request( - "POST", - b"/_matrix/client/unstable/account/3pid/add", - { - "client_secret": client_secret, - "sid": session_id, - "auth": { - "type": "m.login.password", - "user": self.user_id, - "password": "test", - }, - }, - access_token=self.user_id_tok, - ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] - ) - self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) - - # Get user - channel = self.make_request( - "GET", - self.url_3pid, - access_token=self.user_id_tok, - ) - - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) - self.assertFalse(channel.json_body["threepids"]) - - @override_config({"next_link_domain_whitelist": None}) - def test_next_link(self) -> None: - """Tests a valid next_link parameter value with no whitelist (good case)""" - self._request_token( - "something@example.com", - "some_secret", - next_link="https://example.com/a/good/site", - expect_code=HTTPStatus.OK, - ) - - @override_config({"next_link_domain_whitelist": None}) - def test_next_link_exotic_protocol(self) -> None: - """Tests using a esoteric protocol as a next_link parameter value. - Someone may be hosting a client on IPFS etc. - """ - self._request_token( - "something@example.com", - "some_secret", - next_link="some-protocol://abcdefghijklmopqrstuvwxyz", - expect_code=HTTPStatus.OK, - ) - - @override_config({"next_link_domain_whitelist": None}) - def test_next_link_file_uri(self) -> None: - """Tests next_link parameters cannot be file URI""" - # Attempt to use a next_link value that points to the local disk - self._request_token( - "something@example.com", - "some_secret", - next_link="file:///host/path", - expect_code=HTTPStatus.BAD_REQUEST, - ) - - @override_config({"next_link_domain_whitelist": ["example.com", "example.org"]}) - def test_next_link_domain_whitelist(self) -> None: - """Tests next_link parameters must fit the whitelist if provided""" - - # Ensure not providing a next_link parameter still works - self._request_token( - "something@example.com", - "some_secret", - next_link=None, - expect_code=HTTPStatus.OK, - ) - - self._request_token( - "something@example.com", - "some_secret", - next_link="https://example.com/some/good/page", - expect_code=HTTPStatus.OK, - ) - - self._request_token( - "something@example.com", - "some_secret", - next_link="https://example.org/some/also/good/page", - expect_code=HTTPStatus.OK, - ) - - self._request_token( - "something@example.com", - "some_secret", - next_link="https://bad.example.org/some/bad/page", - expect_code=HTTPStatus.BAD_REQUEST, - ) - - @override_config({"next_link_domain_whitelist": []}) - def test_empty_next_link_domain_whitelist(self) -> None: - """Tests an empty next_lint_domain_whitelist value, meaning next_link is essentially - disallowed - """ - self._request_token( - "something@example.com", - "some_secret", - next_link="https://example.com/a/page", - expect_code=HTTPStatus.BAD_REQUEST, - ) - - def _request_token( - self, - email: str, - client_secret: str, - next_link: Optional[str] = None, - expect_code: int = HTTPStatus.OK, - ) -> Optional[str]: - """Request a validation token to add an email address to a user's account - - Args: - email: The email address to validate - client_secret: A secret string - next_link: A link to redirect the user to after validation - expect_code: Expected return code of the call - - Returns: - The ID of the new threepid validation session, or None if the response - did not contain a session ID. - """ - body = {"client_secret": client_secret, "email": email, "send_attempt": 1} - if next_link: - body["next_link"] = next_link - - channel = self.make_request( - "POST", - b"account/3pid/email/requestToken", - body, - ) - - if channel.code != expect_code: - raise HttpResponseException( - channel.code, - channel.result["reason"], - channel.result["body"], - ) - - return channel.json_body.get("sid") - - def _request_token_invalid_email( - self, - email: str, - expected_errcode: str, - expected_error: str, - client_secret: str = "foobar", - ) -> None: - channel = self.make_request( - "POST", - b"account/3pid/email/requestToken", - {"client_secret": client_secret, "email": email, "send_attempt": 1}, - ) - self.assertEqual( - HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] - ) - self.assertEqual(expected_errcode, channel.json_body["errcode"]) - self.assertIn(expected_error, channel.json_body["error"]) - - def _validate_token(self, link: str) -> None: - # Remove the host - path = link.replace("https://example.com", "") - - channel = self.make_request("GET", path, shorthand=False) - self.assertEqual(HTTPStatus.OK, channel.code, channel.result) - - def _get_link_from_email(self) -> str: - assert self.email_attempts, "No emails have been sent" - - raw_msg = self.email_attempts[-1].decode("UTF-8") - mail = Parser().parsestr(raw_msg) - - text = None - for part in mail.walk(): - if part.get_content_type() == "text/plain": - text = part.get_payload(decode=True) - if text is not None: - # According to the logic table in `get_payload`, we know that - # the result of `get_payload` will be `bytes`, but mypy doesn't - # know this and complains. Thus, we assert the type. - assert isinstance(text, bytes) - text = text.decode("UTF-8") - - break - - if not text: - self.fail("Could not find text portion of email to parse") - - # `text` must be a `str`, after being decoded and determined just above - # to not be `None` or an empty `str`. - assert isinstance(text, str) - - match = re.search(r"https://example.com\S+", text) - assert match, "Could not find link in email" - - return match.group(0) - - def _add_email(self, request_email: str, expected_email: str) -> None: - """Test adding an email to profile""" - previous_email_attempts = len(self.email_attempts) - - client_secret = "foobar" - session_id = self._request_token(request_email, client_secret) - - self.assertEqual(len(self.email_attempts) - previous_email_attempts, 1) - link = self._get_link_from_email() - - self._validate_token(link) - - channel = self.make_request( - "POST", - b"/_matrix/client/unstable/account/3pid/add", - { - "client_secret": client_secret, - "sid": session_id, - "auth": { - "type": "m.login.password", - "user": self.user_id, - "password": "test", - }, - }, - access_token=self.user_id_tok, - ) - - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) - - # Get user - channel = self.make_request( - "GET", - self.url_3pid, - access_token=self.user_id_tok, - ) - - self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) - self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) - - threepids = {threepid["address"] for threepid in channel.json_body["threepids"]} - self.assertIn(expected_email, threepids) - - class AccountStatusTestCase(unittest.HomeserverTestCase): servlets = [ account.register_servlets, diff --git a/tests/rest/client/test_auth_issuer.py b/tests/rest/client/test_auth_issuer.py deleted file mode 100644
index 964baeec32..0000000000 --- a/tests/rest/client/test_auth_issuer.py +++ /dev/null
@@ -1,59 +0,0 @@ -# Copyright 2023 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from http import HTTPStatus - -from synapse.rest.client import auth_issuer - -from tests.unittest import HomeserverTestCase, override_config, skip_unless -from tests.utils import HAS_AUTHLIB - -ISSUER = "https://account.example.com/" - - -class AuthIssuerTestCase(HomeserverTestCase): - servlets = [ - auth_issuer.register_servlets, - ] - - def test_returns_404_when_msc3861_disabled(self) -> None: - # Make an unauthenticated request for the discovery info. - channel = self.make_request( - "GET", - "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer", - ) - self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) - - @skip_unless(HAS_AUTHLIB, "requires authlib") - @override_config( - { - "disable_registration": True, - "experimental_features": { - "msc3861": { - "enabled": True, - "issuer": ISSUER, - "client_id": "David Lister", - "client_auth_method": "client_secret_post", - "client_secret": "Who shot Mister Burns?", - } - }, - } - ) - def test_returns_issuer_when_oidc_enabled(self) -> None: - # Make an unauthenticated request for the discovery info. - channel = self.make_request( - "GET", - "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer", - ) - self.assertEqual(channel.code, HTTPStatus.OK) - self.assertEqual(channel.json_body, {"issuer": ISSUER}) diff --git a/tests/rest/client/test_auth_metadata.py b/tests/rest/client/test_auth_metadata.py new file mode 100644
index 0000000000..a935533b09 --- /dev/null +++ b/tests/rest/client/test_auth_metadata.py
@@ -0,0 +1,140 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright 2023 The Matrix.org Foundation C.I.C +# Copyright (C) 2023-2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# Originally licensed under the Apache License, Version 2.0: +# <http://www.apache.org/licenses/LICENSE-2.0>. +# +# [This file includes modifications made by New Vector Limited] +# +from http import HTTPStatus +from unittest.mock import AsyncMock + +from synapse.rest.client import auth_metadata + +from tests.unittest import HomeserverTestCase, override_config, skip_unless +from tests.utils import HAS_AUTHLIB + +ISSUER = "https://account.example.com/" + + +class AuthIssuerTestCase(HomeserverTestCase): + servlets = [ + auth_metadata.register_servlets, + ] + + def test_returns_404_when_msc3861_disabled(self) -> None: + # Make an unauthenticated request for the discovery info. + channel = self.make_request( + "GET", + "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer", + ) + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) + + @skip_unless(HAS_AUTHLIB, "requires authlib") + @override_config( + { + "disable_registration": True, + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": ISSUER, + "client_id": "David Lister", + "client_auth_method": "client_secret_post", + "client_secret": "Who shot Mister Burns?", + } + }, + } + ) + def test_returns_issuer_when_oidc_enabled(self) -> None: + # Patch the HTTP client to return the issuer metadata + req_mock = AsyncMock(return_value={"issuer": ISSUER}) + self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign] + + channel = self.make_request( + "GET", + "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer", + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.json_body, {"issuer": ISSUER}) + + req_mock.assert_called_with( + "https://account.example.com/.well-known/openid-configuration" + ) + req_mock.reset_mock() + + # Second call it should use the cached value + channel = self.make_request( + "GET", + "/_matrix/client/unstable/org.matrix.msc2965/auth_issuer", + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(channel.json_body, {"issuer": ISSUER}) + req_mock.assert_not_called() + + +class AuthMetadataTestCase(HomeserverTestCase): + servlets = [ + auth_metadata.register_servlets, + ] + + def test_returns_404_when_msc3861_disabled(self) -> None: + # Make an unauthenticated request for the discovery info. + channel = self.make_request( + "GET", + "/_matrix/client/unstable/org.matrix.msc2965/auth_metadata", + ) + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) + + @skip_unless(HAS_AUTHLIB, "requires authlib") + @override_config( + { + "disable_registration": True, + "experimental_features": { + "msc3861": { + "enabled": True, + "issuer": ISSUER, + "client_id": "David Lister", + "client_auth_method": "client_secret_post", + "client_secret": "Who shot Mister Burns?", + } + }, + } + ) + def test_returns_issuer_when_oidc_enabled(self) -> None: + # Patch the HTTP client to return the issuer metadata + req_mock = AsyncMock( + return_value={ + "issuer": ISSUER, + "authorization_endpoint": "https://example.com/auth", + "token_endpoint": "https://example.com/token", + } + ) + self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign] + + channel = self.make_request( + "GET", + "/_matrix/client/unstable/org.matrix.msc2965/auth_metadata", + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual( + channel.json_body, + { + "issuer": ISSUER, + "authorization_endpoint": "https://example.com/auth", + "token_endpoint": "https://example.com/token", + }, + ) diff --git a/tests/rest/client/test_capabilities.py b/tests/rest/client/test_capabilities.py
index bbe8ab1a7c..1cfaf4fbd7 100644 --- a/tests/rest/client/test_capabilities.py +++ b/tests/rest/client/test_capabilities.py
@@ -118,7 +118,7 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): self.assertTrue(capabilities["m.change_password"]["enabled"]) self.assertTrue(capabilities["m.set_displayname"]["enabled"]) self.assertTrue(capabilities["m.set_avatar_url"]["enabled"]) - self.assertTrue(capabilities["m.3pid_changes"]["enabled"]) + self.assertFalse(capabilities["m.3pid_changes"]["enabled"]) @override_config({"enable_set_displayname": False}) def test_get_set_displayname_capabilities_displayname_disabled(self) -> None: @@ -142,56 +142,49 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, HTTPStatus.OK) self.assertFalse(capabilities["m.set_avatar_url"]["enabled"]) - @override_config({"enable_3pid_changes": False}) - def test_get_change_3pid_capabilities_3pid_disabled(self) -> None: - """Test if change 3pid is disabled that the server responds it.""" + @override_config( + { + "enable_set_displayname": False, + "experimental_features": {"msc4133_enabled": True}, + } + ) + def test_get_set_displayname_capabilities_displayname_disabled_msc4133( + self, + ) -> None: + """Test if set displayname is disabled that the server responds it.""" access_token = self.login(self.localpart, self.password) channel = self.make_request("GET", self.url, access_token=access_token) capabilities = channel.json_body["capabilities"] self.assertEqual(channel.code, HTTPStatus.OK) - self.assertFalse(capabilities["m.3pid_changes"]["enabled"]) - - @override_config({"experimental_features": {"msc3244_enabled": False}}) - def test_get_does_not_include_msc3244_fields_when_disabled(self) -> None: - access_token = self.get_success( - self.auth_handler.create_access_token_for_user_id( - self.user, device_id=None, valid_until_ms=None - ) - ) - - channel = self.make_request("GET", self.url, access_token=access_token) - capabilities = channel.json_body["capabilities"] - - self.assertEqual(channel.code, 200) - self.assertNotIn( - "org.matrix.msc3244.room_capabilities", capabilities["m.room_versions"] + self.assertFalse(capabilities["m.set_displayname"]["enabled"]) + self.assertTrue(capabilities["uk.tcpip.msc4133.profile_fields"]["enabled"]) + self.assertEqual( + capabilities["uk.tcpip.msc4133.profile_fields"]["disallowed"], + ["displayname"], ) - def test_get_does_include_msc3244_fields_when_enabled(self) -> None: - access_token = self.get_success( - self.auth_handler.create_access_token_for_user_id( - self.user, device_id=None, valid_until_ms=None - ) - ) + @override_config( + { + "enable_set_avatar_url": False, + "experimental_features": {"msc4133_enabled": True}, + } + ) + def test_get_set_avatar_url_capabilities_avatar_url_disabled_msc4133(self) -> None: + """Test if set avatar_url is disabled that the server responds it.""" + access_token = self.login(self.localpart, self.password) channel = self.make_request("GET", self.url, access_token=access_token) capabilities = channel.json_body["capabilities"] - self.assertEqual(channel.code, 200) - for details in capabilities["m.room_versions"][ - "org.matrix.msc3244.room_capabilities" - ].values(): - if details["preferred"] is not None: - self.assertTrue( - details["preferred"] in KNOWN_ROOM_VERSIONS, - str(details["preferred"]), - ) - - self.assertGreater(len(details["support"]), 0) - for room_version in details["support"]: - self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, str(room_version)) + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertFalse(capabilities["m.set_avatar_url"]["enabled"]) + self.assertTrue(capabilities["uk.tcpip.msc4133.profile_fields"]["enabled"]) + self.assertEqual( + capabilities["uk.tcpip.msc4133.profile_fields"]["disallowed"], + ["avatar_url"], + ) def test_get_get_token_login_fields_when_disabled(self) -> None: """By default login via an existing session is disabled.""" diff --git a/tests/rest/client/test_delayed_events.py b/tests/rest/client/test_delayed_events.py new file mode 100644
index 0000000000..9f9d241f12 --- /dev/null +++ b/tests/rest/client/test_delayed_events.py
@@ -0,0 +1,610 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +"""Tests REST events for /delayed_events paths.""" + +from http import HTTPStatus +from typing import List + +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.errors import Codes +from synapse.rest import admin +from synapse.rest.client import delayed_events, login, room, versions +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests import unittest +from tests.unittest import HomeserverTestCase + +PATH_PREFIX = "/_matrix/client/unstable/org.matrix.msc4140/delayed_events" + +_EVENT_TYPE = "com.example.test" + + +class DelayedEventsUnstableSupportTestCase(HomeserverTestCase): + servlets = [versions.register_servlets] + + def test_false_by_default(self) -> None: + channel = self.make_request("GET", "/_matrix/client/versions") + self.assertEqual(channel.code, 200, channel.result) + self.assertFalse(channel.json_body["unstable_features"]["org.matrix.msc4140"]) + + @unittest.override_config({"max_event_delay_duration": "24h"}) + def test_true_if_enabled(self) -> None: + channel = self.make_request("GET", "/_matrix/client/versions") + self.assertEqual(channel.code, 200, channel.result) + self.assertTrue(channel.json_body["unstable_features"]["org.matrix.msc4140"]) + + +class DelayedEventsTestCase(HomeserverTestCase): + """Tests getting and managing delayed events.""" + + servlets = [ + admin.register_servlets, + delayed_events.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + config["max_event_delay_duration"] = "24h" + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.user1_user_id = self.register_user("user1", "pass") + self.user1_access_token = self.login("user1", "pass") + self.user2_user_id = self.register_user("user2", "pass") + self.user2_access_token = self.login("user2", "pass") + + self.room_id = self.helper.create_room_as( + self.user1_user_id, + tok=self.user1_access_token, + extra_content={ + "preset": "public_chat", + "power_level_content_override": { + "events": { + _EVENT_TYPE: 0, + } + }, + }, + ) + + self.helper.join( + room=self.room_id, user=self.user2_user_id, tok=self.user2_access_token + ) + + def test_delayed_events_empty_on_startup(self) -> None: + self.assertListEqual([], self._get_delayed_events()) + + def test_delayed_state_events_are_sent_on_timeout(self) -> None: + state_key = "to_send_on_timeout" + + setter_key = "setter" + setter_expected = "on_timeout" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 900), + { + setter_key: setter_expected, + }, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + content = self._get_delayed_event_content(events[0]) + self.assertEqual(setter_expected, content.get(setter_key), content) + self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + self.reactor.advance(1) + self.assertListEqual([], self._get_delayed_events()) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + ) + self.assertEqual(setter_expected, content.get(setter_key), content) + + @unittest.override_config( + {"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}} + ) + def test_get_delayed_events_ratelimit(self) -> None: + args = ("GET", PATH_PREFIX, b"", self.user1_access_token) + + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result) + + # Add the current user to the ratelimit overrides, allowing them no ratelimiting. + self.get_success( + self.hs.get_datastores().main.set_ratelimit_for_user( + self.user1_user_id, 0, 0 + ) + ) + + # Test that the request isn't ratelimited anymore. + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + def test_update_delayed_event_without_id(self) -> None: + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/", + access_token=self.user1_access_token, + ) + self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, channel.result) + + def test_update_delayed_event_without_body(self) -> None: + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/abc", + access_token=self.user1_access_token, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + Codes.NOT_JSON, + channel.json_body["errcode"], + ) + + def test_update_delayed_event_without_action(self) -> None: + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/abc", + {}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + Codes.MISSING_PARAM, + channel.json_body["errcode"], + ) + + def test_update_delayed_event_with_invalid_action(self) -> None: + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/abc", + {"action": "oops"}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + Codes.INVALID_PARAM, + channel.json_body["errcode"], + ) + + @parameterized.expand(["cancel", "restart", "send"]) + def test_update_delayed_event_without_match(self, action: str) -> None: + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/abc", + {"action": action}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, channel.result) + + def test_cancel_delayed_state_event(self) -> None: + state_key = "to_never_send" + + setter_key = "setter" + setter_expected = "none" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 1500), + { + setter_key: setter_expected, + }, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + delay_id = channel.json_body.get("delay_id") + self.assertIsNotNone(delay_id) + + self.reactor.advance(1) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + content = self._get_delayed_event_content(events[0]) + self.assertEqual(setter_expected, content.get(setter_key), content) + self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/{delay_id}", + {"action": "cancel"}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + self.assertListEqual([], self._get_delayed_events()) + + self.reactor.advance(1) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + @unittest.override_config( + {"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}} + ) + def test_cancel_delayed_event_ratelimit(self) -> None: + delay_ids = [] + for _ in range(2): + channel = self.make_request( + "POST", + _get_path_for_delayed_send(self.room_id, _EVENT_TYPE, 100000), + {}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + delay_id = channel.json_body.get("delay_id") + self.assertIsNotNone(delay_id) + delay_ids.append(delay_id) + + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/{delay_ids.pop(0)}", + {"action": "cancel"}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + args = ( + "POST", + f"{PATH_PREFIX}/{delay_ids.pop(0)}", + {"action": "cancel"}, + self.user1_access_token, + ) + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result) + + # Add the current user to the ratelimit overrides, allowing them no ratelimiting. + self.get_success( + self.hs.get_datastores().main.set_ratelimit_for_user( + self.user1_user_id, 0, 0 + ) + ) + + # Test that the request isn't ratelimited anymore. + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + def test_send_delayed_state_event(self) -> None: + state_key = "to_send_on_request" + + setter_key = "setter" + setter_expected = "on_send" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 100000), + { + setter_key: setter_expected, + }, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + delay_id = channel.json_body.get("delay_id") + self.assertIsNotNone(delay_id) + + self.reactor.advance(1) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + content = self._get_delayed_event_content(events[0]) + self.assertEqual(setter_expected, content.get(setter_key), content) + self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/{delay_id}", + {"action": "send"}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + self.assertListEqual([], self._get_delayed_events()) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + ) + self.assertEqual(setter_expected, content.get(setter_key), content) + + @unittest.override_config({"rc_message": {"per_second": 3.5, "burst_count": 4}}) + def test_send_delayed_event_ratelimit(self) -> None: + delay_ids = [] + for _ in range(2): + channel = self.make_request( + "POST", + _get_path_for_delayed_send(self.room_id, _EVENT_TYPE, 100000), + {}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + delay_id = channel.json_body.get("delay_id") + self.assertIsNotNone(delay_id) + delay_ids.append(delay_id) + + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/{delay_ids.pop(0)}", + {"action": "send"}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + args = ( + "POST", + f"{PATH_PREFIX}/{delay_ids.pop(0)}", + {"action": "send"}, + self.user1_access_token, + ) + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result) + + # Add the current user to the ratelimit overrides, allowing them no ratelimiting. + self.get_success( + self.hs.get_datastores().main.set_ratelimit_for_user( + self.user1_user_id, 0, 0 + ) + ) + + # Test that the request isn't ratelimited anymore. + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + def test_restart_delayed_state_event(self) -> None: + state_key = "to_send_on_restarted_timeout" + + setter_key = "setter" + setter_expected = "on_timeout" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 1500), + { + setter_key: setter_expected, + }, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + delay_id = channel.json_body.get("delay_id") + self.assertIsNotNone(delay_id) + + self.reactor.advance(1) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + content = self._get_delayed_event_content(events[0]) + self.assertEqual(setter_expected, content.get(setter_key), content) + self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/{delay_id}", + {"action": "restart"}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + self.reactor.advance(1) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + content = self._get_delayed_event_content(events[0]) + self.assertEqual(setter_expected, content.get(setter_key), content) + self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + expect_code=HTTPStatus.NOT_FOUND, + ) + + self.reactor.advance(1) + self.assertListEqual([], self._get_delayed_events()) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + ) + self.assertEqual(setter_expected, content.get(setter_key), content) + + @unittest.override_config( + {"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}} + ) + def test_restart_delayed_event_ratelimit(self) -> None: + delay_ids = [] + for _ in range(2): + channel = self.make_request( + "POST", + _get_path_for_delayed_send(self.room_id, _EVENT_TYPE, 100000), + {}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + delay_id = channel.json_body.get("delay_id") + self.assertIsNotNone(delay_id) + delay_ids.append(delay_id) + + channel = self.make_request( + "POST", + f"{PATH_PREFIX}/{delay_ids.pop(0)}", + {"action": "restart"}, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + args = ( + "POST", + f"{PATH_PREFIX}/{delay_ids.pop(0)}", + {"action": "restart"}, + self.user1_access_token, + ) + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result) + + # Add the current user to the ratelimit overrides, allowing them no ratelimiting. + self.get_success( + self.hs.get_datastores().main.set_ratelimit_for_user( + self.user1_user_id, 0, 0 + ) + ) + + # Test that the request isn't ratelimited anymore. + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + def test_delayed_state_is_not_cancelled_by_new_state_from_same_user( + self, + ) -> None: + state_key = "to_not_be_cancelled_by_same_user" + + setter_key = "setter" + setter_expected = "on_timeout" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 900), + { + setter_key: setter_expected, + }, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + + self.helper.send_state( + self.room_id, + _EVENT_TYPE, + { + setter_key: "manual", + }, + self.user1_access_token, + state_key=state_key, + ) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + + self.reactor.advance(1) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + ) + self.assertEqual(setter_expected, content.get(setter_key), content) + + def test_delayed_state_is_cancelled_by_new_state_from_other_user( + self, + ) -> None: + state_key = "to_be_cancelled_by_other_user" + + setter_key = "setter" + channel = self.make_request( + "PUT", + _get_path_for_delayed_state(self.room_id, _EVENT_TYPE, state_key, 900), + { + setter_key: "on_timeout", + }, + self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + events = self._get_delayed_events() + self.assertEqual(1, len(events), events) + + setter_expected = "other_user" + self.helper.send_state( + self.room_id, + _EVENT_TYPE, + { + setter_key: setter_expected, + }, + self.user2_access_token, + state_key=state_key, + ) + self.assertListEqual([], self._get_delayed_events()) + + self.reactor.advance(1) + content = self.helper.get_state( + self.room_id, + _EVENT_TYPE, + self.user1_access_token, + state_key=state_key, + ) + self.assertEqual(setter_expected, content.get(setter_key), content) + + def _get_delayed_events(self) -> List[JsonDict]: + channel = self.make_request( + "GET", + PATH_PREFIX, + access_token=self.user1_access_token, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + key = "delayed_events" + self.assertIn(key, channel.json_body) + + events = channel.json_body[key] + self.assertIsInstance(events, list) + + return events + + def _get_delayed_event_content(self, event: JsonDict) -> JsonDict: + key = "content" + self.assertIn(key, event) + + content = event[key] + self.assertIsInstance(content, dict) + + return content + + +def _get_path_for_delayed_state( + room_id: str, event_type: str, state_key: str, delay_ms: int +) -> str: + return f"rooms/{room_id}/state/{event_type}/{state_key}?org.matrix.msc4140.delay={delay_ms}" + + +def _get_path_for_delayed_send(room_id: str, event_type: str, delay_ms: int) -> str: + return f"rooms/{room_id}/send/{event_type}?org.matrix.msc4140.delay={delay_ms}" diff --git a/tests/rest/client/test_devices.py b/tests/rest/client/test_devices.py
index a3ed12a38f..dd3abdebac 100644 --- a/tests/rest/client/test_devices.py +++ b/tests/rest/client/test_devices.py
@@ -24,6 +24,7 @@ from twisted.internet.defer import ensureDeferred from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import NotFoundError +from synapse.appservice import ApplicationService from synapse.rest import admin, devices, sync from synapse.rest.client import keys, login, register from synapse.server import HomeServer @@ -455,3 +456,183 @@ class DehydratedDeviceTestCase(unittest.HomeserverTestCase): token, ) self.assertEqual(channel.json_body["device_keys"], {"@mikey:test": {}}) + + +class MSC4190AppserviceDevicesTestCase(unittest.HomeserverTestCase): + servlets = [ + register.register_servlets, + devices.register_servlets, + ] + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.hs = self.setup_test_homeserver() + + # This application service uses the new MSC4190 behaviours + self.msc4190_service = ApplicationService( + id="msc4190", + token="some_token", + hs_token="some_token", + sender="@as:example.com", + namespaces={ + ApplicationService.NS_USERS: [{"regex": "@.*", "exclusive": False}] + }, + msc4190_device_management=True, + ) + # This application service doesn't use the new MSC4190 behaviours + self.pre_msc_service = ApplicationService( + id="regular", + token="other_token", + hs_token="other_token", + sender="@as2:example.com", + namespaces={ + ApplicationService.NS_USERS: [{"regex": "@.*", "exclusive": False}] + }, + msc4190_device_management=False, + ) + self.hs.get_datastores().main.services_cache.append(self.msc4190_service) + self.hs.get_datastores().main.services_cache.append(self.pre_msc_service) + return self.hs + + def test_PUT_device(self) -> None: + self.register_appservice_user("alice", self.msc4190_service.token) + self.register_appservice_user("bob", self.pre_msc_service.token) + + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {"devices": []}) + + channel = self.make_request( + "PUT", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@alice:test", + content={"display_name": "Alice's device"}, + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 201, channel.json_body) + + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(len(channel.json_body["devices"]), 1) + self.assertEqual(channel.json_body["devices"][0]["device_id"], "AABBCCDD") + + # Doing a second time should return a 200 instead of a 201 + channel = self.make_request( + "PUT", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@alice:test", + content={"display_name": "Alice's device"}, + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # On the regular service, that API should not allow for the + # creation of new devices. + channel = self.make_request( + "PUT", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@bob:test", + content={"display_name": "Bob's device"}, + access_token=self.pre_msc_service.token, + ) + self.assertEqual(channel.code, 404, channel.json_body) + + def test_DELETE_device(self) -> None: + self.register_appservice_user("alice", self.msc4190_service.token) + + # There should be no device + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {"devices": []}) + + # Create a device + channel = self.make_request( + "PUT", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@alice:test", + content={}, + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 201, channel.json_body) + + # There should be one device + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(len(channel.json_body["devices"]), 1) + + # Delete the device. UIA should not be required. + channel = self.make_request( + "DELETE", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # There should be no device again + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {"devices": []}) + + def test_POST_delete_devices(self) -> None: + self.register_appservice_user("alice", self.msc4190_service.token) + + # There should be no device + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {"devices": []}) + + # Create a device + channel = self.make_request( + "PUT", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@alice:test", + content={}, + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 201, channel.json_body) + + # There should be one device + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(len(channel.json_body["devices"]), 1) + + # Delete the device with delete_devices + # UIA should not be required. + channel = self.make_request( + "POST", + "/_matrix/client/v3/delete_devices?user_id=@alice:test", + content={"devices": ["AABBCCDD"]}, + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # There should be no device again + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {"devices": []}) diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py
index 06f1c1b234..039144fdbe 100644 --- a/tests/rest/client/test_events.py +++ b/tests/rest/client/test_events.py
@@ -19,7 +19,7 @@ # # -""" Tests REST events for /events paths.""" +"""Tests REST events for /events paths.""" from unittest.mock import Mock diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py deleted file mode 100644
index 63c2c5923e..0000000000 --- a/tests/rest/client/test_identity.py +++ /dev/null
@@ -1,67 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -from http import HTTPStatus - -from twisted.test.proto_helpers import MemoryReactor - -import synapse.rest.admin -from synapse.rest.client import login, room -from synapse.server import HomeServer -from synapse.util import Clock - -from tests import unittest - - -class IdentityTestCase(unittest.HomeserverTestCase): - servlets = [ - synapse.rest.admin.register_servlets_for_client_rest_resource, - room.register_servlets, - login.register_servlets, - ] - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() - config["enable_3pid_lookup"] = False - self.hs = self.setup_test_homeserver(config=config) - - return self.hs - - def test_3pid_lookup_disabled(self) -> None: - self.hs.config.registration.enable_3pid_lookup = False - - self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") - - channel = self.make_request(b"POST", "/createRoom", b"{}", access_token=tok) - self.assertEqual(channel.code, HTTPStatus.OK, channel.result) - room_id = channel.json_body["room_id"] - - request_data = { - "id_server": "testis", - "medium": "email", - "address": "test@example.com", - "id_access_token": tok, - } - request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii") - channel = self.make_request( - b"POST", request_url, request_data, access_token=tok - ) - self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py
index 8bbd109092..d9a210b616 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py
@@ -315,9 +315,7 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase): "master_key": master_key2, }, ) - self.assertEqual( - channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body - ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) # Pretend that MAS did UIA and allowed us to replace the master key. channel = self.make_request( @@ -349,9 +347,7 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase): "master_key": master_key3, }, ) - self.assertEqual( - channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body - ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) # Pretend that MAS did UIA and allowed us to replace the master key. channel = self.make_request( @@ -376,6 +372,4 @@ class SigningKeyUploadServletTestCase(unittest.HomeserverTestCase): "master_key": master_key3, }, ) - self.assertEqual( - channel.code, HTTPStatus.NOT_IMPLEMENTED, channel.json_body - ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.json_body) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py
index 2b1e44381b..24e2288ee3 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py
@@ -27,6 +27,7 @@ from typing import ( Collection, Dict, List, + Literal, Optional, Tuple, Union, @@ -35,7 +36,6 @@ from unittest.mock import Mock from urllib.parse import urlencode import pymacaroons -from typing_extensions import Literal from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource @@ -43,6 +43,7 @@ from twisted.web.resource import Resource import synapse.rest.admin from synapse.api.constants import ApprovalNoticeMedium, LoginType from synapse.api.errors import Codes +from synapse.api.urls import LoginSSORedirectURIBuilder from synapse.appservice import ApplicationService from synapse.http.client import RawHeaders from synapse.module_api import ModuleApi @@ -55,7 +56,6 @@ from synapse.util import Clock from tests import unittest from tests.handlers.test_oidc import HAS_OIDC -from tests.handlers.test_saml import has_saml2 from tests.rest.client.utils import TEST_OIDC_CONFIG from tests.server import FakeChannel from tests.test_utils.html_parsers import TestHtmlParser @@ -69,6 +69,10 @@ try: except ImportError: HAS_JWT = False +import logging + +logger = logging.getLogger(__name__) + # synapse server name: used to populate public_baseurl in some tests SYNAPSE_SERVER_PUBLIC_HOSTNAME = "synapse" @@ -77,22 +81,7 @@ SYNAPSE_SERVER_PUBLIC_HOSTNAME = "synapse" # FakeChannel.isSecure() returns False, so synapse will see the requested uri as # http://..., so using http in the public_baseurl stops Synapse trying to redirect to # https://.... -BASE_URL = "http://%s/" % (SYNAPSE_SERVER_PUBLIC_HOSTNAME,) - -# CAS server used in some tests -CAS_SERVER = "https://fake.test" - -# just enough to tell pysaml2 where to redirect to -SAML_SERVER = "https://test.saml.server/idp/sso" -TEST_SAML_METADATA = """ -<md:EntityDescriptor xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"> - <md:IDPSSODescriptor protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol"> - <md:SingleSignOnService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="%(SAML_SERVER)s"/> - </md:IDPSSODescriptor> -</md:EntityDescriptor> -""" % { - "SAML_SERVER": SAML_SERVER, -} +PUBLIC_BASEURL = "http://%s/" % (SYNAPSE_SERVER_PUBLIC_HOSTNAME,) LOGIN_URL = b"/_matrix/client/r0/login" TEST_URL = b"/_matrix/client/r0/account/whoami" @@ -109,6 +98,23 @@ ADDITIONAL_LOGIN_FLOWS = [ ] +def get_relative_uri_from_absolute_uri(absolute_uri: str) -> str: + """ + Peels off the path and query string from an absolute URI. Useful when interacting + with `make_request(...)` util function which expects a relative path instead of a + full URI. + """ + parsed_uri = urllib.parse.urlparse(absolute_uri) + # Sanity check that we're working with an absolute URI + assert parsed_uri.scheme == "http" or parsed_uri.scheme == "https" + + relative_uri = parsed_uri.path + if parsed_uri.query: + relative_uri += "?" + parsed_uri.query + + return relative_uri + + class TestSpamChecker: def __init__(self, config: None, api: ModuleApi): api.register_spam_checker_callbacks( @@ -172,7 +178,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver() self.hs.config.registration.enable_registration = True - self.hs.config.registration.registrations_require_3pid = [] self.hs.config.registration.auto_join_rooms = [] self.hs.config.captcha.enable_registration_captcha = False @@ -603,7 +608,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): ) -@skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC") +@skip_unless(HAS_OIDC, "Requires OIDC") class MultiSSOTestCase(unittest.HomeserverTestCase): """Tests for homeservers with multiple SSO providers enabled""" @@ -614,21 +619,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): def default_config(self) -> Dict[str, Any]: config = super().default_config() - config["public_baseurl"] = BASE_URL - - config["cas_config"] = { - "enabled": True, - "server_url": CAS_SERVER, - "service_url": "https://matrix.goodserver.com:8448", - } - - config["saml2_config"] = { - "sp_config": { - "metadata": {"inline": [TEST_SAML_METADATA]}, - # use the XMLSecurity backend to avoid relying on xmlsec1 - "crypto_backend": "XMLSecurity", - }, - } + config["public_baseurl"] = PUBLIC_BASEURL # default OIDC provider config["oidc_config"] = TEST_OIDC_CONFIG @@ -653,6 +644,9 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): ] return config + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.login_sso_redirect_url_builder = LoginSSORedirectURIBuilder(hs.config) + def create_resource_dict(self) -> Dict[str, Resource]: d = super().create_resource_dict() d.update(build_synapse_client_resource_tree(self.hs)) @@ -664,7 +658,6 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200, channel.result) expected_flow_types = [ - "m.login.cas", "m.login.sso", "m.login.token", "m.login.password", @@ -678,8 +671,6 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): self.assertCountEqual( flows["m.login.sso"]["identity_providers"], [ - {"id": "cas", "name": "CAS"}, - {"id": "saml", "name": "SAML"}, {"id": "oidc-idp1", "name": "IDP1"}, {"id": "oidc", "name": "OIDC"}, ], @@ -713,56 +704,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): self.assertEqual(params["redirectUrl"], [TEST_CLIENT_REDIRECT_URL]) returned_idps.append(params["idp"][0]) - self.assertCountEqual(returned_idps, ["cas", "oidc", "oidc-idp1", "saml"]) - - def test_multi_sso_redirect_to_cas(self) -> None: - """If CAS is chosen, should redirect to the CAS server""" - - channel = self.make_request( - "GET", - "/_synapse/client/pick_idp?redirectUrl=" - + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) - + "&idp=cas", - shorthand=False, - ) - self.assertEqual(channel.code, 302, channel.result) - location_headers = channel.headers.getRawHeaders("Location") - assert location_headers - cas_uri = location_headers[0] - cas_uri_path, cas_uri_query = cas_uri.split("?", 1) - - # it should redirect us to the login page of the cas server - self.assertEqual(cas_uri_path, CAS_SERVER + "/login") - - # check that the redirectUrl is correctly encoded in the service param - ie, the - # place that CAS will redirect to - cas_uri_params = urllib.parse.parse_qs(cas_uri_query) - service_uri = cas_uri_params["service"][0] - _, service_uri_query = service_uri.split("?", 1) - service_uri_params = urllib.parse.parse_qs(service_uri_query) - self.assertEqual(service_uri_params["redirectUrl"][0], TEST_CLIENT_REDIRECT_URL) - - def test_multi_sso_redirect_to_saml(self) -> None: - """If SAML is chosen, should redirect to the SAML server""" - channel = self.make_request( - "GET", - "/_synapse/client/pick_idp?redirectUrl=" - + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) - + "&idp=saml", - ) - self.assertEqual(channel.code, 302, channel.result) - location_headers = channel.headers.getRawHeaders("Location") - assert location_headers - saml_uri = location_headers[0] - saml_uri_path, saml_uri_query = saml_uri.split("?", 1) - - # it should redirect us to the login page of the SAML server - self.assertEqual(saml_uri_path, SAML_SERVER) - - # the RelayState is used to carry the client redirect url - saml_uri_params = urllib.parse.parse_qs(saml_uri_query) - relay_state_param = saml_uri_params["RelayState"][0] - self.assertEqual(relay_state_param, TEST_CLIENT_REDIRECT_URL) + self.assertCountEqual(returned_idps, ["oidc", "oidc-idp1"]) def test_login_via_oidc(self) -> None: """If OIDC is chosen, should redirect to the OIDC auth endpoint""" @@ -773,13 +715,38 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): # pick the default OIDC provider channel = self.make_request( "GET", - "/_synapse/client/pick_idp?redirectUrl=" - + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) - + "&idp=oidc", + f"/_synapse/client/pick_idp?redirectUrl={urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)}&idp=oidc", ) self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers + sso_login_redirect_uri = location_headers[0] + + # it should redirect us to the standard login SSO redirect flow + self.assertEqual( + sso_login_redirect_uri, + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id="oidc", client_redirect_url=TEST_CLIENT_REDIRECT_URL + ), + ) + + with fake_oidc_server.patch_homeserver(hs=self.hs): + # follow the redirect + channel = self.make_request( + "GET", + # We have to make this relative to be compatible with `make_request(...)` + get_relative_uri_from_absolute_uri(sso_login_redirect_uri), + # We have to set the Host header to match the `public_baseurl` to avoid + # the extra redirect in the `SsoRedirectServlet` in order for the + # cookies to be visible. + custom_headers=[ + ("Host", SYNAPSE_SERVER_PUBLIC_HOSTNAME), + ], + ) + + self.assertEqual(channel.code, 302, channel.result) + location_headers = channel.headers.getRawHeaders("Location") + assert location_headers oidc_uri = location_headers[0] oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1) @@ -838,12 +805,38 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): self.assertEqual(chan.json_body["user_id"], "@user1:test") def test_multi_sso_redirect_to_unknown(self) -> None: - """An unknown IdP should cause a 400""" + """An unknown IdP should cause a 404""" channel = self.make_request( "GET", "/_synapse/client/pick_idp?redirectUrl=http://x&idp=xyz", ) - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, 302, channel.result) + location_headers = channel.headers.getRawHeaders("Location") + assert location_headers + sso_login_redirect_uri = location_headers[0] + + # it should redirect us to the standard login SSO redirect flow + self.assertEqual( + sso_login_redirect_uri, + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id="xyz", client_redirect_url="http://x" + ), + ) + + # follow the redirect + channel = self.make_request( + "GET", + # We have to make this relative to be compatible with `make_request(...)` + get_relative_uri_from_absolute_uri(sso_login_redirect_uri), + # We have to set the Host header to match the `public_baseurl` to avoid + # the extra redirect in the `SsoRedirectServlet` in order for the + # cookies to be visible. + custom_headers=[ + ("Host", SYNAPSE_SERVER_PUBLIC_HOSTNAME), + ], + ) + + self.assertEqual(channel.code, 404, channel.result) def test_client_idp_redirect_to_unknown(self) -> None: """If the client tries to pick an unknown IdP, return a 404""" @@ -891,162 +884,12 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): raise ValueError("No %s caveat in macaroon" % (key,)) -class CASTestCase(unittest.HomeserverTestCase): - servlets = [ - login.register_servlets, - ] - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.base_url = "https://matrix.goodserver.com/" - self.redirect_path = "_synapse/client/login/sso/redirect/confirm" - - config = self.default_config() - config["public_baseurl"] = ( - config.get("public_baseurl") or "https://matrix.goodserver.com:8448" - ) - config["cas_config"] = { - "enabled": True, - "server_url": CAS_SERVER, - } - - cas_user_id = "username" - self.user_id = "@%s:test" % cas_user_id - - async def get_raw(uri: str, args: Any) -> bytes: - """Return an example response payload from a call to the `/proxyValidate` - endpoint of a CAS server, copied from - https://apereo.github.io/cas/5.0.x/protocol/CAS-Protocol-V2-Specification.html#26-proxyvalidate-cas-20 - - This needs to be returned by an async function (as opposed to set as the - mock's return value) because the corresponding Synapse code awaits on it. - """ - return ( - """ - <cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'> - <cas:authenticationSuccess> - <cas:user>%s</cas:user> - <cas:proxyGrantingTicket>PGTIOU-84678-8a9d...</cas:proxyGrantingTicket> - <cas:proxies> - <cas:proxy>https://proxy2/pgtUrl</cas:proxy> - <cas:proxy>https://proxy1/pgtUrl</cas:proxy> - </cas:proxies> - </cas:authenticationSuccess> - </cas:serviceResponse> - """ - % cas_user_id - ).encode("utf-8") - - mocked_http_client = Mock(spec=["get_raw"]) - mocked_http_client.get_raw.side_effect = get_raw - - self.hs = self.setup_test_homeserver( - config=config, - proxied_http_client=mocked_http_client, - ) - - return self.hs - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.deactivate_account_handler = hs.get_deactivate_account_handler() - - def test_cas_redirect_confirm(self) -> None: - """Tests that the SSO login flow serves a confirmation page before redirecting a - user to the redirect URL. - """ - base_url = "/_matrix/client/r0/login/cas/ticket?redirectUrl" - redirect_url = "https://dodgy-site.com/" - - url_parts = list(urllib.parse.urlparse(base_url)) - query = dict(urllib.parse.parse_qsl(url_parts[4])) - query.update({"redirectUrl": redirect_url}) - query.update({"ticket": "ticket"}) - url_parts[4] = urllib.parse.urlencode(query) - cas_ticket_url = urllib.parse.urlunparse(url_parts) - - # Get Synapse to call the fake CAS and serve the template. - channel = self.make_request("GET", cas_ticket_url) - - # Test that the response is HTML. - self.assertEqual(channel.code, 200, channel.result) - content_type_header_value = "" - for header in channel.headers.getRawHeaders("Content-Type", []): - content_type_header_value = header - - self.assertTrue(content_type_header_value.startswith("text/html")) - - # Test that the body isn't empty. - self.assertTrue(len(channel.result["body"]) > 0) - - # And that it contains our redirect link - self.assertIn(redirect_url, channel.result["body"].decode("UTF-8")) - - @override_config( - { - "sso": { - "client_whitelist": [ - "https://legit-site.com/", - "https://other-site.com/", - ] - } - } - ) - def test_cas_redirect_whitelisted(self) -> None: - """Tests that the SSO login flow serves a redirect to a whitelisted url""" - self._test_redirect("https://legit-site.com/") - - @override_config({"public_baseurl": "https://example.com"}) - def test_cas_redirect_login_fallback(self) -> None: - self._test_redirect("https://example.com/_matrix/static/client/login") - - def _test_redirect(self, redirect_url: str) -> None: - """Tests that the SSO login flow serves a redirect for the given redirect URL.""" - cas_ticket_url = ( - "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" - % (urllib.parse.quote(redirect_url)) - ) - - # Get Synapse to call the fake CAS and serve the template. - channel = self.make_request("GET", cas_ticket_url) - - self.assertEqual(channel.code, 302) - location_headers = channel.headers.getRawHeaders("Location") - assert location_headers - self.assertEqual(location_headers[0][: len(redirect_url)], redirect_url) - - @override_config({"sso": {"client_whitelist": ["https://legit-site.com/"]}}) - def test_deactivated_user(self) -> None: - """Logging in as a deactivated account should error.""" - redirect_url = "https://legit-site.com/" - - # First login (to create the user). - self._test_redirect(redirect_url) - - # Deactivate the account. - self.get_success( - self.deactivate_account_handler.deactivate_account( - self.user_id, False, create_requester(self.user_id) - ) - ) - - # Request the CAS ticket. - cas_ticket_url = ( - "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" - % (urllib.parse.quote(redirect_url)) - ) - - # Get Synapse to call the fake CAS and serve the template. - channel = self.make_request("GET", cas_ticket_url) - - # Because the user is deactivated they are served an error template. - self.assertEqual(channel.code, 403) - self.assertIn(b"SSO account deactivated", channel.result["body"]) - - @skip_unless(HAS_JWT, "requires authlib") class JWTTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, + profile.register_servlets, ] jwt_secret = "secret" @@ -1133,18 +976,18 @@ class JWTTestCase(unittest.HomeserverTestCase): channel = self.jwt_login({"sub": "kermit", "iss": "invalid"}) self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") - self.assertEqual( + self.assertRegex( channel.json_body["error"], - 'JWT validation failed: invalid_claim: Invalid claim "iss"', + r"^JWT validation failed: invalid_claim: Invalid claim [\"']iss[\"']$", ) # Not providing an issuer. channel = self.jwt_login({"sub": "kermit"}) self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") - self.assertEqual( + self.assertRegex( channel.json_body["error"], - 'JWT validation failed: missing_claim: Missing "iss" claim', + r"^JWT validation failed: missing_claim: Missing [\"']iss[\"'] claim$", ) def test_login_iss_no_config(self) -> None: @@ -1165,18 +1008,18 @@ class JWTTestCase(unittest.HomeserverTestCase): channel = self.jwt_login({"sub": "kermit", "aud": "invalid"}) self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") - self.assertEqual( + self.assertRegex( channel.json_body["error"], - 'JWT validation failed: invalid_claim: Invalid claim "aud"', + r"^JWT validation failed: invalid_claim: Invalid claim [\"']aud[\"']$", ) # Not providing an audience. channel = self.jwt_login({"sub": "kermit"}) self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") - self.assertEqual( + self.assertRegex( channel.json_body["error"], - 'JWT validation failed: missing_claim: Missing "aud" claim', + r"^JWT validation failed: missing_claim: Missing [\"']aud[\"'] claim$", ) def test_login_aud_no_config(self) -> None: @@ -1184,9 +1027,9 @@ class JWTTestCase(unittest.HomeserverTestCase): channel = self.jwt_login({"sub": "kermit", "aud": "invalid"}) self.assertEqual(channel.code, 403, msg=channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") - self.assertEqual( + self.assertRegex( channel.json_body["error"], - 'JWT validation failed: invalid_claim: Invalid claim "aud"', + r"^JWT validation failed: invalid_claim: Invalid claim [\"']aud[\"']$", ) def test_login_default_sub(self) -> None: @@ -1202,6 +1045,30 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@frog:test") + @override_config( + {"jwt_config": {**base_config, "display_name_claim": "display_name"}} + ) + def test_login_custom_display_name(self) -> None: + """Test setting a custom display name.""" + localpart = "pinkie" + user_id = f"@{localpart}:test" + display_name = "Pinkie Pie" + + # Perform the login, specifying a custom display name. + channel = self.jwt_login({"sub": localpart, "display_name": display_name}) + self.assertEqual(channel.code, 200, msg=channel.result) + self.assertEqual(channel.json_body["user_id"], user_id) + + # Fetch the user's display name and check that it was set correctly. + access_token = channel.json_body["access_token"] + channel = self.make_request( + "GET", + f"/_matrix/client/v3/profile/{user_id}/displayname", + access_token=access_token, + ) + self.assertEqual(channel.code, 200, msg=channel.result) + self.assertEqual(channel.json_body["displayname"], display_name) + def test_login_no_token(self) -> None: params = {"type": "org.matrix.login.jwt"} channel = self.make_request(b"POST", LOGIN_URL, params) @@ -1448,7 +1315,7 @@ class UsernamePickerTestCase(HomeserverTestCase): def default_config(self) -> Dict[str, Any]: config = super().default_config() - config["public_baseurl"] = BASE_URL + config["public_baseurl"] = PUBLIC_BASEURL config["oidc_config"] = {} config["oidc_config"].update(TEST_OIDC_CONFIG) @@ -1474,7 +1341,6 @@ class UsernamePickerTestCase(HomeserverTestCase): self, fake_oidc_server: FakeOidcServer, displayname: str, - email: str, picture: str, ) -> Tuple[str, str]: # do the start of the login flow @@ -1483,8 +1349,7 @@ class UsernamePickerTestCase(HomeserverTestCase): { "sub": "tester", "displayname": displayname, - "picture": picture, - "email": email, + "picture": picture }, TEST_CLIENT_REDIRECT_URL, ) @@ -1513,7 +1378,6 @@ class UsernamePickerTestCase(HomeserverTestCase): session = username_mapping_sessions[session_id] self.assertEqual(session.remote_user_id, "tester") self.assertEqual(session.display_name, displayname) - self.assertEqual(session.emails, [email]) self.assertEqual(session.avatar_url, picture) self.assertEqual(session.client_redirect_url, TEST_CLIENT_REDIRECT_URL) @@ -1530,11 +1394,10 @@ class UsernamePickerTestCase(HomeserverTestCase): mxid = "@bobby:test" displayname = "Jonny" - email = "bobby@test.com" picture = "mxc://test/avatar_url" picker_url, session_id = self.proceed_to_username_picker_page( - fake_oidc_server, displayname, email, picture + fake_oidc_server, displayname, picture ) # Now, submit a username to the username picker, which should serve a redirect @@ -1544,8 +1407,7 @@ class UsernamePickerTestCase(HomeserverTestCase): { b"username": b"bobby", b"use_display_name": b"true", - b"use_avatar": b"true", - b"use_email": email, + b"use_avatar": b"true" } ).encode("utf8") chan = self.make_request( @@ -1606,12 +1468,6 @@ class UsernamePickerTestCase(HomeserverTestCase): self.assertIn("mxc://test", channel.json_body["avatar_url"]) self.assertEqual(displayname, channel.json_body["displayname"]) - # ensure the email from the OIDC response has been configured for the user. - channel = self.make_request( - "GET", "/account/3pid", access_token=chan.json_body["access_token"] - ) - self.assertEqual(channel.code, 200, channel.result) - self.assertEqual(email, channel.json_body["threepids"][0]["address"]) def test_username_picker_dont_use_displayname_avatar_or_email(self) -> None: """Test the happy path of a username picker flow without using displayname, avatar or email.""" @@ -1620,12 +1476,11 @@ class UsernamePickerTestCase(HomeserverTestCase): mxid = "@bobby:test" displayname = "Jonny" - email = "bobby@test.com" picture = "mxc://test/avatar_url" username = "bobby" picker_url, session_id = self.proceed_to_username_picker_page( - fake_oidc_server, displayname, email, picture + fake_oidc_server, displayname, picture ) # Now, submit a username to the username picker, which should serve a redirect @@ -1696,13 +1551,6 @@ class UsernamePickerTestCase(HomeserverTestCase): self.assertNotIn("avatar_url", channel.json_body) self.assertEqual(username, channel.json_body["displayname"]) - # ensure the email from the OIDC response has not been configured for the user. - channel = self.make_request( - "GET", "/account/3pid", access_token=chan.json_body["access_token"] - ) - self.assertEqual(channel.code, 200, channel.result) - self.assertListEqual([], channel.json_body["threepids"]) - async def mock_get_file( url: str, diff --git a/tests/rest/client/test_login_token_request.py b/tests/rest/client/test_login_token_request.py
index fbacf9d869..99a0fd4fcd 100644 --- a/tests/rest/client/test_login_token_request.py +++ b/tests/rest/client/test_login_token_request.py
@@ -43,7 +43,6 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver() self.hs.config.registration.enable_registration = True - self.hs.config.registration.registrations_require_3pid = [] self.hs.config.registration.auto_join_rooms = [] self.hs.config.captcha.enable_registration_captcha = False diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py
index 30b6d31d0a..6ee761e44b 100644 --- a/tests/rest/client/test_media.py +++ b/tests/rest/client/test_media.py
@@ -24,14 +24,13 @@ import json import os import re import shutil -from typing import Any, BinaryIO, Dict, List, Optional, Sequence, Tuple, Type +from typing import Any, BinaryIO, ClassVar, Dict, List, Optional, Sequence, Tuple, Type from unittest.mock import MagicMock, Mock, patch from urllib import parse from urllib.parse import quote, urlencode from parameterized import parameterized, parameterized_class from PIL import Image as Image -from typing_extensions import ClassVar from twisted.internet import defer from twisted.internet._resolver import HostResolution @@ -66,6 +65,7 @@ from tests.media.test_media_storage import ( SVG, TestImage, empty_file, + small_cmyk_jpeg, small_lossless_webp, small_png, small_png_with_transparency, @@ -137,6 +137,7 @@ class MediaDomainBlockingTests(unittest.HomeserverTestCase): time_now_ms=clock.time_msec(), upload_name="test.png", filesystem_id=file_id, + sha256=file_id, ) ) self.register_user("user", "password") @@ -1005,7 +1006,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): data = base64.b64encode(SMALL_PNG) end_content = ( - b"<html><head>" b'<img src="data:image/png;base64,%s" />' b"</head></html>" + b'<html><head><img src="data:image/png;base64,%s" /></head></html>' ) % (data,) channel = self.make_request( @@ -1617,6 +1618,63 @@ class MediaConfigTest(unittest.HomeserverTestCase): ) +class MediaConfigModuleCallbackTestCase(unittest.HomeserverTestCase): + servlets = [ + media.register_servlets, + admin.register_servlets, + login.register_servlets, + ] + + def make_homeserver( + self, reactor: ThreadedMemoryReactorClock, clock: Clock + ) -> HomeServer: + config = self.default_config() + + self.storage_path = self.mktemp() + self.media_store_path = self.mktemp() + os.mkdir(self.storage_path) + os.mkdir(self.media_store_path) + config["media_store_path"] = self.media_store_path + + provider_config = { + "module": "synapse.media.storage_provider.FileStorageProviderBackend", + "store_local": True, + "store_synchronous": False, + "store_remote": True, + "config": {"directory": self.storage_path}, + } + + config["media_storage_providers"] = [provider_config] + + return self.setup_test_homeserver(config=config) + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.user = self.register_user("user", "password") + self.tok = self.login("user", "password") + + hs.get_module_api().register_media_repository_callbacks( + get_media_config_for_user=self.get_media_config_for_user, + ) + + async def get_media_config_for_user( + self, + user_id: str, + ) -> Optional[JsonDict]: + # We echo back the user_id and set a custom upload size. + return {"m.upload.size": 1024, "user_id": user_id} + + def test_media_config(self) -> None: + channel = self.make_request( + "GET", + "/_matrix/client/v1/media/config", + shorthand=False, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["m.upload.size"], 1024) + self.assertEqual(channel.json_body["user_id"], self.user) + + class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): servlets = [ media.register_servlets, @@ -1916,6 +1974,7 @@ class RemoteDownloadLimiterTestCase(unittest.HomeserverTestCase): test_images = [ small_png, small_png_with_transparency, + small_cmyk_jpeg, small_lossless_webp, empty_file, SVG, @@ -1957,7 +2016,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): """A mock for MatrixFederationHttpClient.federation_get_file.""" def write_to( - r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]] + r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]], bytes]], ) -> Tuple[int, Dict[bytes, List[bytes]], bytes]: data, response = r output_stream.write(data) @@ -1991,7 +2050,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): """A mock for MatrixFederationHttpClient.get_file.""" def write_to( - r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]] + r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]], ) -> Tuple[int, Dict[bytes, List[bytes]]]: data, response = r output_stream.write(data) @@ -2400,7 +2459,7 @@ class DownloadAndThumbnailTestCase(unittest.HomeserverTestCase): if expected_body is not None: self.assertEqual( - channel.result["body"], expected_body, channel.result["body"] + channel.result["body"], expected_body, channel.result["body"].hex() ) else: # ensure that the result is at least some valid image @@ -2592,6 +2651,7 @@ class AuthenticatedMediaTestCase(unittest.HomeserverTestCase): time_now_ms=self.clock.time_msec(), upload_name="remote_test.png", filesystem_id=file_id, + sha256=file_id, ) ) @@ -2675,3 +2735,114 @@ class AuthenticatedMediaTestCase(unittest.HomeserverTestCase): access_token=self.tok, ) self.assertEqual(channel10.code, 200) + + def test_authenticated_media_etag(self) -> None: + """Test that ETag works correctly with authenticated media over client + APIs""" + + # upload some local media with authentication on + channel = self.make_request( + "POST", + "_matrix/media/v3/upload?filename=test_png_upload", + SMALL_PNG, + self.tok, + shorthand=False, + content_type=b"image/png", + custom_headers=[("Content-Length", str(67))], + ) + self.assertEqual(channel.code, 200) + res = channel.json_body.get("content_uri") + assert res is not None + uri = res.split("mxc://")[1] + + # Check standard media endpoint + self._check_caching(f"/download/{uri}") + + # check thumbnails as well + params = "?width=32&height=32&method=crop" + self._check_caching(f"/thumbnail/{uri}{params}") + + # Inject a piece of remote media. + file_id = "abcdefg12345" + file_info = FileInfo(server_name="lonelyIsland", file_id=file_id) + + media_storage = self.hs.get_media_repository().media_storage + + ctx = media_storage.store_into_file(file_info) + (f, fname) = self.get_success(ctx.__aenter__()) + f.write(SMALL_PNG) + self.get_success(ctx.__aexit__(None, None, None)) + + # we write the authenticated status when storing media, so this should pick up + # config and authenticate the media + self.get_success( + self.store.store_cached_remote_media( + origin="lonelyIsland", + media_id="52", + media_type="image/png", + media_length=1, + time_now_ms=self.clock.time_msec(), + upload_name="remote_test.png", + filesystem_id=file_id, + sha256=file_id, + ) + ) + + # ensure we have thumbnails for the non-dynamic code path + if self.extra_config == {"dynamic_thumbnails": False}: + self.get_success( + self.repo._generate_thumbnails( + "lonelyIsland", "52", file_id, "image/png" + ) + ) + + self._check_caching("/download/lonelyIsland/52") + + params = "?width=32&height=32&method=crop" + self._check_caching(f"/thumbnail/lonelyIsland/52{params}") + + def _check_caching(self, path: str) -> None: + """ + Checks that: + 1. fetching the path returns an ETag header + 2. refetching with the ETag returns a 304 without a body + 3. refetching with the ETag but through unauthenticated endpoint + returns 404 + """ + + # Request media over authenticated endpoint, should be found + channel1 = self.make_request( + "GET", + f"/_matrix/client/v1/media{path}", + access_token=self.tok, + shorthand=False, + ) + self.assertEqual(channel1.code, 200) + + # Should have a single ETag field + etags = channel1.headers.getRawHeaders("ETag") + self.assertIsNotNone(etags) + assert etags is not None # For mypy + self.assertEqual(len(etags), 1) + etag = etags[0] + + # Refetching with the etag should result in 304 and empty body. + channel2 = self.make_request( + "GET", + f"/_matrix/client/v1/media{path}", + access_token=self.tok, + shorthand=False, + custom_headers=[("If-None-Match", etag)], + ) + self.assertEqual(channel2.code, 304) + self.assertEqual(channel2.is_finished(), True) + self.assertNotIn("body", channel2.result) + + # Refetching with the etag but no access token should result in 404. + channel3 = self.make_request( + "GET", + f"/_matrix/media/r0{path}", + shorthand=False, + custom_headers=[("If-None-Match", etag)], + ) + self.assertEqual(channel3.code, 404) diff --git a/tests/rest/client/test_models.py b/tests/rest/client/test_models.py deleted file mode 100644
index f8a56c80ca..0000000000 --- a/tests/rest/client/test_models.py +++ /dev/null
@@ -1,89 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2022 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# -import unittest as stdlib_unittest -from typing import TYPE_CHECKING - -from typing_extensions import Literal - -from synapse._pydantic_compat import HAS_PYDANTIC_V2 -from synapse.types.rest.client import EmailRequestTokenBody - -if TYPE_CHECKING or HAS_PYDANTIC_V2: - from pydantic.v1 import BaseModel, ValidationError -else: - from pydantic import BaseModel, ValidationError - - -class ThreepidMediumEnumTestCase(stdlib_unittest.TestCase): - class Model(BaseModel): - medium: Literal["email", "msisdn"] - - def test_accepts_valid_medium_string(self) -> None: - """Sanity check that Pydantic behaves sensibly with an enum-of-str - - This is arguably more of a test of a class that inherits from str and Enum - simultaneously. - """ - model = self.Model.parse_obj({"medium": "email"}) - self.assertEqual(model.medium, "email") - - def test_rejects_invalid_medium_value(self) -> None: - with self.assertRaises(ValidationError): - self.Model.parse_obj({"medium": "interpretive_dance"}) - - def test_rejects_invalid_medium_type(self) -> None: - with self.assertRaises(ValidationError): - self.Model.parse_obj({"medium": 123}) - - -class EmailRequestTokenBodyTestCase(stdlib_unittest.TestCase): - base_request = { - "client_secret": "hunter2", - "email": "alice@wonderland.com", - "send_attempt": 1, - } - - def test_token_required_if_id_server_provided(self) -> None: - with self.assertRaises(ValidationError): - EmailRequestTokenBody.parse_obj( - { - **self.base_request, - "id_server": "identity.wonderland.com", - } - ) - with self.assertRaises(ValidationError): - EmailRequestTokenBody.parse_obj( - { - **self.base_request, - "id_server": "identity.wonderland.com", - "id_access_token": None, - } - ) - - def test_token_typechecked_when_id_server_provided(self) -> None: - with self.assertRaises(ValidationError): - EmailRequestTokenBody.parse_obj( - { - **self.base_request, - "id_server": "identity.wonderland.com", - "id_access_token": 1337, - } - ) diff --git a/tests/rest/client/test_owned_state.py b/tests/rest/client/test_owned_state.py new file mode 100644
index 0000000000..5fb5767676 --- /dev/null +++ b/tests/rest/client/test_owned_state.py
@@ -0,0 +1,308 @@ +from http import HTTPStatus + +from parameterized import parameterized_class + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.errors import Codes +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions +from synapse.rest import admin +from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + +_STATE_EVENT_TEST_TYPE = "com.example.test" + +# To stress-test parsing, include separator & sigil characters +_STATE_KEY_SUFFIX = "_state_key_suffix:!@#$123" + + +class OwnedStateBase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.creator_user_id = self.register_user("creator", "pass") + self.creator_access_token = self.login("creator", "pass") + self.user1_user_id = self.register_user("user1", "pass") + self.user1_access_token = self.login("user1", "pass") + + self.room_id = self.helper.create_room_as( + self.creator_user_id, + tok=self.creator_access_token, + is_public=True, + extra_content={ + "power_level_content_override": { + "events": { + _STATE_EVENT_TEST_TYPE: 0, + }, + }, + }, + ) + + self.helper.join( + room=self.room_id, user=self.user1_user_id, tok=self.user1_access_token + ) + + +class WithoutOwnedStateTestCase(OwnedStateBase): + def default_config(self) -> JsonDict: + config = super().default_config() + config["default_room_version"] = RoomVersions.V10.identifier + return config + + def test_user_can_set_state_with_own_userid_key(self) -> None: + self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}", + tok=self.user1_access_token, + expect_code=HTTPStatus.OK, + ) + + def test_room_creator_cannot_set_state_with_own_suffixed_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.creator_user_id}{_STATE_KEY_SUFFIX}", + tok=self.creator_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_room_creator_cannot_set_state_with_other_userid_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}", + tok=self.creator_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_room_creator_cannot_set_state_with_other_suffixed_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX}", + tok=self.creator_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_room_creator_cannot_set_state_with_nonmember_userid_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key="@notinroom:hs2", + tok=self.creator_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_room_creator_cannot_set_state_with_malformed_userid_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key="@oops", + tok=self.creator_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + +@parameterized_class( + ("room_version",), + [(i,) for i, v in KNOWN_ROOM_VERSIONS.items() if v.msc3757_enabled], +) +class MSC3757OwnedStateTestCase(OwnedStateBase): + room_version: str + + def default_config(self) -> JsonDict: + config = super().default_config() + config["default_room_version"] = self.room_version + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) + + self.user2_user_id = self.register_user("user2", "pass") + self.user2_access_token = self.login("user2", "pass") + + self.helper.join( + room=self.room_id, user=self.user2_user_id, tok=self.user2_access_token + ) + + def test_user_can_set_state_with_own_suffixed_key(self) -> None: + self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX}", + tok=self.user1_access_token, + expect_code=HTTPStatus.OK, + ) + + def test_room_creator_can_set_state_with_other_userid_key(self) -> None: + self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}", + tok=self.creator_access_token, + expect_code=HTTPStatus.OK, + ) + + def test_room_creator_can_set_state_with_other_suffixed_key(self) -> None: + self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX}", + tok=self.creator_access_token, + expect_code=HTTPStatus.OK, + ) + + def test_user_cannot_set_state_with_other_userid_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user2_user_id}", + tok=self.user1_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_user_cannot_set_state_with_other_suffixed_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user2_user_id}{_STATE_KEY_SUFFIX}", + tok=self.user1_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_user_cannot_set_state_with_unseparated_suffixed_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.user1_user_id}{_STATE_KEY_SUFFIX[1:]}", + tok=self.user1_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_user_cannot_set_state_with_misplaced_userid_in_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + # Still put @ at start of state key, because without it, there is no write protection at all + state_key=f"@prefix_{self.user1_user_id}{_STATE_KEY_SUFFIX}", + tok=self.user1_access_token, + expect_code=HTTPStatus.FORBIDDEN, + ) + + self.assertEqual( + body["errcode"], + Codes.FORBIDDEN, + body, + ) + + def test_room_creator_can_set_state_with_nonmember_userid_key(self) -> None: + self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key="@notinroom:hs2", + tok=self.creator_access_token, + expect_code=HTTPStatus.OK, + ) + + def test_room_creator_cannot_set_state_with_malformed_userid_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key="@oops", + tok=self.creator_access_token, + expect_code=HTTPStatus.BAD_REQUEST, + ) + + self.assertEqual( + body["errcode"], + Codes.BAD_JSON, + body, + ) + + def test_room_creator_cannot_set_state_with_improperly_suffixed_key(self) -> None: + body = self.helper.send_state( + self.room_id, + _STATE_EVENT_TEST_TYPE, + {}, + state_key=f"{self.creator_user_id}@{_STATE_KEY_SUFFIX[1:]}", + tok=self.creator_access_token, + expect_code=HTTPStatus.BAD_REQUEST, + ) + + self.assertEqual( + body["errcode"], + Codes.BAD_JSON, + body, + ) diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py
index 5ced8319e1..6b9c70974a 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py
@@ -29,6 +29,7 @@ from synapse.types import UserID from synapse.util import Clock from tests import unittest +from tests.unittest import override_config class PresenceTestCase(unittest.HomeserverTestCase): @@ -95,3 +96,54 @@ class PresenceTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual(self.presence_handler.set_state.call_count, 0) + + @override_config( + {"rc_presence": {"per_user": {"per_second": 0.1, "burst_count": 1}}} + ) + def test_put_presence_over_ratelimit(self) -> None: + """ + Multiple PUTs to the status endpoint without sufficient delay will be rate limited. + """ + self.hs.config.server.presence_enabled = True + + body = {"presence": "here", "status_msg": "beep boop"} + channel = self.make_request( + "PUT", "/presence/%s/status" % (self.user_id,), body + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + + body = {"presence": "here", "status_msg": "beep boop"} + channel = self.make_request( + "PUT", "/presence/%s/status" % (self.user_id,), body + ) + + self.assertEqual(channel.code, HTTPStatus.TOO_MANY_REQUESTS) + self.assertEqual(self.presence_handler.set_state.call_count, 1) + + @override_config( + {"rc_presence": {"per_user": {"per_second": 0.1, "burst_count": 1}}} + ) + def test_put_presence_within_ratelimit(self) -> None: + """ + Multiple PUTs to the status endpoint with sufficient delay should all call set_state. + """ + self.hs.config.server.presence_enabled = True + + body = {"presence": "here", "status_msg": "beep boop"} + channel = self.make_request( + "PUT", "/presence/%s/status" % (self.user_id,), body + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + + # Advance time a sufficient amount to avoid rate limiting. + self.reactor.advance(30) + + body = {"presence": "here", "status_msg": "beep boop"} + channel = self.make_request( + "PUT", "/presence/%s/status" % (self.user_id,), body + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual(self.presence_handler.set_state.call_count, 2) diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py
index f98f3f77aa..708402b792 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py
@@ -20,20 +20,25 @@ # """Tests REST events for /profile paths.""" + import urllib.parse from http import HTTPStatus from typing import Any, Dict, Optional +from canonicaljson import encode_canonical_json + from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import Codes from synapse.rest import admin from synapse.rest.client import login, profile, room from synapse.server import HomeServer +from synapse.storage.databases.main.profile import MAX_PROFILE_SIZE from synapse.types import UserID from synapse.util import Clock from tests import unittest +from tests.utils import USE_POSTGRES_FOR_TESTS class ProfileTestCase(unittest.HomeserverTestCase): @@ -479,6 +484,298 @@ class ProfileTestCase(unittest.HomeserverTestCase): # The client requested ?propagate=true, so it should have happened. self.assertEqual(channel.json_body.get(prop), "http://my.server/pic.gif") + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_get_missing_custom_field(self) -> None: + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field", + ) + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_get_missing_custom_field_invalid_field_name(self) -> None: + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/[custom_field]", + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) + + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_get_custom_field_rejects_bad_username(self) -> None: + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{urllib.parse.quote('@alice:')}/custom_field", + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) + + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_set_custom_field(self) -> None: + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field", + content={"custom_field": "test"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field", + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.json_body, {"custom_field": "test"}) + + # Overwriting the field should work. + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field", + content={"custom_field": "new_Value"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field", + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.json_body, {"custom_field": "new_Value"}) + + # Deleting the field should work. + channel = self.make_request( + "DELETE", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field", + content={}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field", + ) + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_non_string(self) -> None: + """Non-string fields are supported for custom fields.""" + fields = { + "bool_field": True, + "array_field": ["test"], + "object_field": {"test": "test"}, + "numeric_field": 1, + "null_field": None, + } + + for key, value in fields.items(): + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}", + content={key: value}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + channel = self.make_request( + "GET", + f"/_matrix/client/v3/profile/{self.owner}", + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.json_body, {"displayname": "owner", **fields}) + + # Check getting individual fields works. + for key, value in fields.items(): + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}", + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + self.assertEqual(channel.json_body, {key: value}) + + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_set_custom_field_noauth(self) -> None: + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field", + content={"custom_field": "test"}, + ) + self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.MISSING_TOKEN) + + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_set_custom_field_size(self) -> None: + """ + Attempts to set a custom field name that is too long should get a 400 error. + """ + # Key is missing. + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/", + content={"": "test"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) + + # Single key is too large. + key = "c" * 500 + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}", + content={key: "test"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.KEY_TOO_LARGE) + + channel = self.make_request( + "DELETE", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}", + content={key: "test"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.KEY_TOO_LARGE) + + # Key doesn't match body. + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/custom_field", + content={"diff_key": "test"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.MISSING_PARAM) + + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_set_custom_field_profile_too_long(self) -> None: + """ + Attempts to set a custom field that would push the overall profile too large. + """ + # Get right to the boundary: + # len("displayname") + len("owner") + 5 = 21 for the displayname + # 1 + 65498 + 5 for key "a" = 65504 + # 2 braces, 1 comma + # 3 + 21 + 65498 = 65522 < 65536. + key = "a" + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}", + content={key: "a" * 65498}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Get the entire profile. + channel = self.make_request( + "GET", + f"/_matrix/client/v3/profile/{self.owner}", + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + canonical_json = encode_canonical_json(channel.json_body) + # 6 is the minimum bytes to store a value: 4 quotes, 1 colon, 1 comma, an empty key. + # Be one below that so we can prove we're at the boundary. + self.assertEqual(len(canonical_json), MAX_PROFILE_SIZE - 8) + + # Postgres stores JSONB with whitespace, while SQLite doesn't. + if USE_POSTGRES_FOR_TESTS: + ADDITIONAL_CHARS = 0 + else: + ADDITIONAL_CHARS = 1 + + # The next one should fail, note the value has a (JSON) length of 2. + key = "b" + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}", + content={key: "1" + "a" * ADDITIONAL_CHARS}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.PROFILE_TOO_LARGE) + + # Setting an avatar or (longer) display name should not work. + channel = self.make_request( + "PUT", + f"/profile/{self.owner}/displayname", + content={"displayname": "owner12345678" + "a" * ADDITIONAL_CHARS}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.PROFILE_TOO_LARGE) + + channel = self.make_request( + "PUT", + f"/profile/{self.owner}/avatar_url", + content={"avatar_url": "mxc://foo/bar"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.PROFILE_TOO_LARGE) + + # Removing a single byte should work. + key = "b" + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}", + content={key: "" + "a" * ADDITIONAL_CHARS}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Finally, setting a field that already exists to a value that is <= in length should work. + key = "a" + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/{key}", + content={key: ""}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_set_custom_field_displayname(self) -> None: + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/displayname", + content={"displayname": "test"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + displayname = self._get_displayname() + self.assertEqual(displayname, "test") + + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_set_custom_field_avatar_url(self) -> None: + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.owner}/avatar_url", + content={"avatar_url": "mxc://test/good"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + avatar_url = self._get_avatar_url() + self.assertEqual(avatar_url, "mxc://test/good") + + @unittest.override_config({"experimental_features": {"msc4133_enabled": True}}) + def test_set_custom_field_other(self) -> None: + """Setting someone else's profile field should fail""" + channel = self.make_request( + "PUT", + f"/_matrix/client/unstable/uk.tcpip.msc4133/profile/{self.other}/custom_field", + content={"custom_field": "test"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) + def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]) -> None: """Stores metadata about files in the database. diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py
index 694f143eff..d40efdfe1d 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py
@@ -64,7 +64,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self, reactor: ThreadedMemoryReactorClock, clock: Clock ) -> HomeServer: hs = super().make_homeserver(reactor, clock) - hs.get_send_email_handler()._sendmail = AsyncMock() return hs def test_POST_appservice_registration_valid(self) -> None: @@ -120,6 +119,34 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 401, msg=channel.result) + def test_POST_appservice_msc4190_enabled(self) -> None: + # With MSC4190 enabled, the registration should *not* return an access token + user_id = "@as_user_kermit:test" + as_token = "i_am_an_app_service" + + appservice = ApplicationService( + as_token, + id="1234", + namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, + sender="@as:test", + msc4190_device_management=True, + ) + + self.hs.get_datastores().main.services_cache.append(appservice) + request_data = { + "username": "as_user_kermit", + "type": APP_SERVICE_REGISTRATION_TYPE, + } + + channel = self.make_request( + b"POST", self.url + b"?access_token=i_am_an_app_service", request_data + ) + + self.assertEqual(channel.code, 200, msg=channel.result) + det_data = {"user_id": user_id, "home_server": self.hs.hostname} + self.assertLessEqual(det_data.items(), channel.json_body.items()) + self.assertNotIn("access_token", channel.json_body) + def test_POST_bad_password(self) -> None: request_data = {"username": "kermit", "password": 666} channel = self.make_request(b"POST", self.url, request_data) @@ -593,155 +620,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): # with the stock config, we only expect the dummy flow self.assertCountEqual([["m.login.dummy"]], (f["stages"] for f in flows)) - - @unittest.override_config( - { - "public_baseurl": "https://test_server", - "enable_registration_captcha": True, - "user_consent": { - "version": "1", - "template_dir": "/", - "require_at_registration": True, - }, - "account_threepid_delegates": { - "msisdn": "https://id_server", - }, - "email": {"notif_from": "Synapse <synapse@example.com>"}, - } - ) - def test_advertised_flows_captcha_and_terms_and_3pids(self) -> None: - channel = self.make_request(b"POST", self.url, b"{}") - self.assertEqual(channel.code, 401, msg=channel.result) - flows = channel.json_body["flows"] - - self.assertCountEqual( - [ - ["m.login.recaptcha", "m.login.terms", "m.login.dummy"], - ["m.login.recaptcha", "m.login.terms", "m.login.email.identity"], - ["m.login.recaptcha", "m.login.terms", "m.login.msisdn"], - [ - "m.login.recaptcha", - "m.login.terms", - "m.login.msisdn", - "m.login.email.identity", - ], - ], - (f["stages"] for f in flows), - ) - - @unittest.override_config( - { - "public_baseurl": "https://test_server", - "registrations_require_3pid": ["email"], - "disable_msisdn_registration": True, - "email": { - "smtp_host": "mail_server", - "smtp_port": 2525, - "notif_from": "sender@host", - }, - } - ) - def test_advertised_flows_no_msisdn_email_required(self) -> None: - channel = self.make_request(b"POST", self.url, b"{}") - self.assertEqual(channel.code, 401, msg=channel.result) - flows = channel.json_body["flows"] - - # with the stock config, we expect all four combinations of 3pid - self.assertCountEqual( - [["m.login.email.identity"]], (f["stages"] for f in flows) - ) - - @unittest.override_config( - { - "request_token_inhibit_3pid_errors": True, - "public_baseurl": "https://test_server", - "email": { - "smtp_host": "mail_server", - "smtp_port": 2525, - "notif_from": "sender@host", - }, - } - ) - def test_request_token_existing_email_inhibit_error(self) -> None: - """Test that requesting a token via this endpoint doesn't leak existing - associations if configured that way. - """ - user_id = self.register_user("kermit", "monkey") - self.login("kermit", "monkey") - - email = "test@example.com" - - # Add a threepid - self.get_success( - self.hs.get_datastores().main.user_add_threepid( - user_id=user_id, - medium="email", - address=email, - validated_at=0, - added_at=0, - ) - ) - - channel = self.make_request( - "POST", - b"register/email/requestToken", - {"client_secret": "foobar", "email": email, "send_attempt": 1}, - ) - self.assertEqual(200, channel.code, channel.result) - - self.assertIsNotNone(channel.json_body.get("sid")) - - @unittest.override_config( - { - "public_baseurl": "https://test_server", - "email": { - "smtp_host": "mail_server", - "smtp_port": 2525, - "notif_from": "sender@host", - }, - } - ) - def test_reject_invalid_email(self) -> None: - """Check that bad emails are rejected""" - - # Test for email with multiple @ - channel = self.make_request( - "POST", - b"register/email/requestToken", - {"client_secret": "foobar", "email": "email@@email", "send_attempt": 1}, - ) - self.assertEqual(400, channel.code, channel.result) - # Check error to ensure that we're not erroring due to a bug in the test. - self.assertEqual( - channel.json_body, - {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, - ) - - # Test for email with no @ - channel = self.make_request( - "POST", - b"register/email/requestToken", - {"client_secret": "foobar", "email": "email", "send_attempt": 1}, - ) - self.assertEqual(400, channel.code, channel.result) - self.assertEqual( - channel.json_body, - {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, - ) - - # Test for super long email - email = "a@" + "a" * 1000 - channel = self.make_request( - "POST", - b"register/email/requestToken", - {"client_secret": "foobar", "email": email, "send_attempt": 1}, - ) - self.assertEqual(400, channel.code, channel.result) - self.assertEqual( - channel.json_body, - {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, - ) - + @override_config( { "inhibit_user_in_use_error": True, @@ -925,224 +804,6 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200, msg=channel.result) -class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): - servlets = [ - register.register_servlets, - synapse.rest.admin.register_servlets_for_client_rest_resource, - login.register_servlets, - sync.register_servlets, - account_validity.register_servlets, - account.register_servlets, - ] - - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() - - # Test for account expiring after a week and renewal emails being sent 2 - # days before expiry. - config["enable_registration"] = True - config["account_validity"] = { - "enabled": True, - "period": 604800000, # Time in ms for 1 week - "renew_at": 172800000, # Time in ms for 2 days - "renew_by_email_enabled": True, - "renew_email_subject": "Renew your account", - "account_renewed_html_path": "account_renewed.html", - "invalid_token_html_path": "invalid_token.html", - } - - # Email config. - - config["email"] = { - "enable_notifs": True, - "template_dir": os.path.abspath( - pkg_resources.resource_filename("synapse", "res/templates") - ), - "expiry_template_html": "notice_expiry.html", - "expiry_template_text": "notice_expiry.txt", - "notif_template_html": "notif_mail.html", - "notif_template_text": "notif_mail.txt", - "smtp_host": "127.0.0.1", - "smtp_port": 20, - "require_transport_security": False, - "smtp_user": None, - "smtp_pass": None, - "notif_from": "test@example.com", - } - - self.hs = self.setup_test_homeserver(config=config) - - async def sendmail(*args: Any, **kwargs: Any) -> None: - self.email_attempts.append((args, kwargs)) - - self.email_attempts: List[Tuple[Any, Any]] = [] - self.hs.get_send_email_handler()._sendmail = sendmail - - self.store = self.hs.get_datastores().main - - return self.hs - - def test_renewal_email(self) -> None: - self.email_attempts = [] - - (user_id, tok) = self.create_user() - - # Move 5 days forward. This should trigger a renewal email to be sent. - self.reactor.advance(datetime.timedelta(days=5).total_seconds()) - self.assertEqual(len(self.email_attempts), 1) - - # Retrieving the URL from the email is too much pain for now, so we - # retrieve the token from the DB. - renewal_token = self.get_success(self.store.get_renewal_token_for_user(user_id)) - url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token - channel = self.make_request(b"GET", url) - self.assertEqual(channel.code, 200, msg=channel.result) - - # Check that we're getting HTML back. - content_type = channel.headers.getRawHeaders(b"Content-Type") - self.assertEqual(content_type, [b"text/html; charset=utf-8"], channel.result) - - # Check that the HTML we're getting is the one we expect on a successful renewal. - expiration_ts = self.get_success(self.store.get_expiration_ts_for_user(user_id)) - expected_html = self.hs.config.account_validity.account_validity_account_renewed_template.render( - expiration_ts=expiration_ts - ) - self.assertEqual( - channel.result["body"], expected_html.encode("utf8"), channel.result - ) - - # Move 1 day forward. Try to renew with the same token again. - url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token - channel = self.make_request(b"GET", url) - self.assertEqual(channel.code, 200, msg=channel.result) - - # Check that we're getting HTML back. - content_type = channel.headers.getRawHeaders(b"Content-Type") - self.assertEqual(content_type, [b"text/html; charset=utf-8"], channel.result) - - # Check that the HTML we're getting is the one we expect when reusing a - # token. The account expiration date should not have changed. - expected_html = self.hs.config.account_validity.account_validity_account_previously_renewed_template.render( - expiration_ts=expiration_ts - ) - self.assertEqual( - channel.result["body"], expected_html.encode("utf8"), channel.result - ) - - # Move 3 days forward. If the renewal failed, every authed request with - # our access token should be denied from now, otherwise they should - # succeed. - self.reactor.advance(datetime.timedelta(days=3).total_seconds()) - channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEqual(channel.code, 200, msg=channel.result) - - def test_renewal_invalid_token(self) -> None: - # Hit the renewal endpoint with an invalid token and check that it behaves as - # expected, i.e. that it responds with 404 Not Found and the correct HTML. - url = "/_matrix/client/unstable/account_validity/renew?token=123" - channel = self.make_request(b"GET", url) - self.assertEqual(channel.code, 404, msg=channel.result) - - # Check that we're getting HTML back. - content_type = channel.headers.getRawHeaders(b"Content-Type") - self.assertEqual(content_type, [b"text/html; charset=utf-8"], channel.result) - - # Check that the HTML we're getting is the one we expect when using an - # invalid/unknown token. - expected_html = ( - self.hs.config.account_validity.account_validity_invalid_token_template.render() - ) - self.assertEqual( - channel.result["body"], expected_html.encode("utf8"), channel.result - ) - - def test_manual_email_send(self) -> None: - self.email_attempts = [] - - (user_id, tok) = self.create_user() - channel = self.make_request( - b"POST", - "/_matrix/client/unstable/account_validity/send_mail", - access_token=tok, - ) - self.assertEqual(channel.code, 200, msg=channel.result) - - self.assertEqual(len(self.email_attempts), 1) - - def test_deactivated_user(self) -> None: - self.email_attempts = [] - - (user_id, tok) = self.create_user() - - request_data = { - "auth": { - "type": "m.login.password", - "user": user_id, - "password": "monkey", - }, - "erase": False, - } - channel = self.make_request( - "POST", "account/deactivate", request_data, access_token=tok - ) - self.assertEqual(channel.code, 200) - - self.reactor.advance(datetime.timedelta(days=8).total_seconds()) - - self.assertEqual(len(self.email_attempts), 0) - - def create_user(self) -> Tuple[str, str]: - user_id = self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") - # We need to manually add an email address otherwise the handler will do - # nothing. - now = self.hs.get_clock().time_msec() - self.get_success( - self.store.user_add_threepid( - user_id=user_id, - medium="email", - address="kermit@example.com", - validated_at=now, - added_at=now, - ) - ) - return user_id, tok - - def test_manual_email_send_expired_account(self) -> None: - user_id = self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") - - # We need to manually add an email address otherwise the handler will do - # nothing. - now = self.hs.get_clock().time_msec() - self.get_success( - self.store.user_add_threepid( - user_id=user_id, - medium="email", - address="kermit@example.com", - validated_at=now, - added_at=now, - ) - ) - - # Make the account expire. - self.reactor.advance(datetime.timedelta(days=8).total_seconds()) - - # Ignore all emails sent by the automatic background task and only focus on the - # ones sent manually. - self.email_attempts = [] - - # Test that we're still able to manually trigger a mail to be sent. - channel = self.make_request( - b"POST", - "/_matrix/client/unstable/account_validity/send_mail", - access_token=tok, - ) - self.assertEqual(channel.code, 200, msg=channel.result) - - self.assertEqual(len(self.email_attempts), 1) - - class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource] diff --git a/tests/rest/client/test_rendezvous.py b/tests/rest/client/test_rendezvous.py
index 0ab754a11a..83a5cbdc15 100644 --- a/tests/rest/client/test_rendezvous.py +++ b/tests/rest/client/test_rendezvous.py
@@ -34,7 +34,6 @@ from tests import unittest from tests.unittest import override_config from tests.utils import HAS_AUTHLIB -msc3886_endpoint = "/_matrix/client/unstable/org.matrix.msc3886/rendezvous" msc4108_endpoint = "/_matrix/client/unstable/org.matrix.msc4108/rendezvous" @@ -54,17 +53,9 @@ class RendezvousServletTestCase(unittest.HomeserverTestCase): } def test_disabled(self) -> None: - channel = self.make_request("POST", msc3886_endpoint, {}, access_token=None) - self.assertEqual(channel.code, 404) channel = self.make_request("POST", msc4108_endpoint, {}, access_token=None) self.assertEqual(channel.code, 404) - @override_config({"experimental_features": {"msc3886_endpoint": "/asd"}}) - def test_msc3886_redirect(self) -> None: - channel = self.make_request("POST", msc3886_endpoint, {}, access_token=None) - self.assertEqual(channel.code, 307) - self.assertEqual(channel.headers.getRawHeaders("Location"), ["/asd"]) - @unittest.skip_unless(HAS_AUTHLIB, "requires authlib") @override_config( { @@ -126,10 +117,11 @@ class RendezvousServletTestCase(unittest.HomeserverTestCase): headers = dict(channel.headers.getAllRawHeaders()) self.assertIn(b"ETag", headers) self.assertIn(b"Expires", headers) + self.assertIn(b"Content-Length", headers) self.assertEqual(headers[b"Content-Type"], [b"application/json"]) self.assertEqual(headers[b"Access-Control-Allow-Origin"], [b"*"]) self.assertEqual(headers[b"Access-Control-Expose-Headers"], [b"etag"]) - self.assertEqual(headers[b"Cache-Control"], [b"no-store"]) + self.assertEqual(headers[b"Cache-Control"], [b"no-store, no-transform"]) self.assertEqual(headers[b"Pragma"], [b"no-cache"]) self.assertIn("url", channel.json_body) self.assertTrue(channel.json_body["url"].startswith("https://")) @@ -150,9 +142,10 @@ class RendezvousServletTestCase(unittest.HomeserverTestCase): self.assertEqual(headers[b"ETag"], [etag]) self.assertIn(b"Expires", headers) self.assertEqual(headers[b"Content-Type"], [b"text/plain"]) + self.assertEqual(headers[b"Content-Length"], [b"7"]) self.assertEqual(headers[b"Access-Control-Allow-Origin"], [b"*"]) self.assertEqual(headers[b"Access-Control-Expose-Headers"], [b"etag"]) - self.assertEqual(headers[b"Cache-Control"], [b"no-store"]) + self.assertEqual(headers[b"Cache-Control"], [b"no-store, no-transform"]) self.assertEqual(headers[b"Pragma"], [b"no-cache"]) self.assertEqual(channel.text_body, "foo=bar") diff --git a/tests/rest/client/test_reporting.py b/tests/rest/client/test_reporting.py
index 009deb9cb0..723553979f 100644 --- a/tests/rest/client/test_reporting.py +++ b/tests/rest/client/test_reporting.py
@@ -156,58 +156,31 @@ class ReportRoomTestCase(unittest.HomeserverTestCase): self.room_id = self.helper.create_room_as( self.other_user, tok=self.other_user_tok, is_public=True ) - self.report_path = ( - f"/_matrix/client/unstable/org.matrix.msc4151/rooms/{self.room_id}/report" - ) + self.report_path = f"/_matrix/client/v3/rooms/{self.room_id}/report" - @unittest.override_config( - { - "experimental_features": {"msc4151_enabled": True}, - } - ) def test_reason_str(self) -> None: data = {"reason": "this makes me sad"} self._assert_status(200, data) - @unittest.override_config( - { - "experimental_features": {"msc4151_enabled": True}, - } - ) def test_no_reason(self) -> None: data = {"not_reason": "for typechecking"} self._assert_status(400, data) - @unittest.override_config( - { - "experimental_features": {"msc4151_enabled": True}, - } - ) def test_reason_nonstring(self) -> None: data = {"reason": 42} self._assert_status(400, data) - @unittest.override_config( - { - "experimental_features": {"msc4151_enabled": True}, - } - ) def test_reason_null(self) -> None: data = {"reason": None} self._assert_status(400, data) - @unittest.override_config( - { - "experimental_features": {"msc4151_enabled": True}, - } - ) def test_cannot_report_nonexistent_room(self) -> None: """ Tests that we don't accept event reports for rooms which do not exist. """ channel = self.make_request( "POST", - "/_matrix/client/unstable/org.matrix.msc4151/rooms/!bloop:example.org/report", + "/_matrix/client/v3/rooms/!bloop:example.org/report", {"reason": "i am very sad"}, access_token=self.other_user_tok, shorthand=False, diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py
index c559dfda83..04442febb4 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py
@@ -4,7 +4,7 @@ # Copyright 2019 The Matrix.org Foundation C.I.C. # Copyright 2017 Vector Creations Ltd # Copyright 2014-2016 OpenMarket Ltd -# Copyright (C) 2023 New Vector, Ltd +# Copyright (C) 2023-2024 New Vector, Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as @@ -25,12 +25,11 @@ import json from http import HTTPStatus -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple, Union from unittest.mock import AsyncMock, Mock, call, patch from urllib import parse as urlparse from parameterized import param, parameterized -from typing_extensions import Literal from twisted.test.proto_helpers import MemoryReactor @@ -68,6 +67,7 @@ from tests.http.server._base import make_request_with_cancellation_test from tests.storage.test_stream import PaginationTestCase from tests.test_utils.event_injection import create_event from tests.unittest import override_config +from tests.utils import default_config PATH_PREFIX = b"/_matrix/client/api/v1" @@ -742,7 +742,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(33, channel.resource_usage.db_txn_count) + self.assertEqual(35, channel.resource_usage.db_txn_count) def test_post_room_initial_state(self) -> None: # POST with initial_state config key, expect new room id @@ -755,7 +755,7 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(35, channel.resource_usage.db_txn_count) + self.assertEqual(37, channel.resource_usage.db_txn_count) def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id @@ -1337,17 +1337,13 @@ class RoomJoinTestCase(RoomBase): "POST", f"/join/{self.room1}", access_token=self.tok2 ) self.assertEqual(channel.code, 403) - self.assertEqual( - channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" - ) + self.assertEqual(channel.json_body["errcode"], "M_USER_SUSPENDED") channel = self.make_request( "POST", f"/rooms/{self.room1}/join", access_token=self.tok2 ) self.assertEqual(channel.code, 403) - self.assertEqual( - channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" - ) + self.assertEqual(channel.json_body["errcode"], "M_USER_SUSPENDED") def test_suspended_user_cannot_knock_on_room(self) -> None: # set the user as suspended @@ -1361,9 +1357,7 @@ class RoomJoinTestCase(RoomBase): shorthand=False, ) self.assertEqual(channel.code, 403) - self.assertEqual( - channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" - ) + self.assertEqual(channel.json_body["errcode"], "M_USER_SUSPENDED") def test_suspended_user_cannot_invite_to_room(self) -> None: # set the user as suspended @@ -1376,9 +1370,24 @@ class RoomJoinTestCase(RoomBase): access_token=self.tok1, content={"user_id": self.user2}, ) - self.assertEqual( - channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" + self.assertEqual(channel.json_body["errcode"], "M_USER_SUSPENDED") + + def test_suspended_user_can_leave_room(self) -> None: + channel = self.make_request( + "POST", f"/join/{self.room1}", access_token=self.tok1 ) + self.assertEqual(channel.code, 200) + + # set the user as suspended + self.get_success(self.store.set_user_suspended_status(self.user1, True)) + + # leave room + channel = self.make_request( + "POST", + f"/rooms/{self.room1}/leave", + access_token=self.tok1, + ) + self.assertEqual(channel.code, 200) class RoomAppserviceTsParamTestCase(unittest.HomeserverTestCase): @@ -2291,6 +2300,141 @@ class RoomMessageFilterTestCase(RoomBase): self.assertEqual(len(chunk), 2, [event["content"] for event in chunk]) +class RoomDelayedEventTestCase(RoomBase): + """Tests delayed events.""" + + user_id = "@sid1:red" + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.room_id = self.helper.create_room_as(self.user_id) + + @unittest.override_config({"max_event_delay_duration": "24h"}) + def test_send_delayed_invalid_event(self) -> None: + """Test sending a delayed event with invalid content.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {}, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertNotIn("org.matrix.msc4140.errcode", channel.json_body) + + def test_delayed_event_unsupported_by_default(self) -> None: + """Test that sending a delayed event is unsupported with the default config.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {"body": "test", "msgtype": "m.text"}, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + "M_MAX_DELAY_UNSUPPORTED", + channel.json_body.get("org.matrix.msc4140.errcode"), + channel.json_body, + ) + + @unittest.override_config({"max_event_delay_duration": "1000"}) + def test_delayed_event_exceeds_max_delay(self) -> None: + """Test that sending a delayed event fails if its delay is longer than allowed.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {"body": "test", "msgtype": "m.text"}, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + "M_MAX_DELAY_EXCEEDED", + channel.json_body.get("org.matrix.msc4140.errcode"), + channel.json_body, + ) + + @unittest.override_config({"max_event_delay_duration": "24h"}) + def test_delayed_event_with_negative_delay(self) -> None: + """Test that sending a delayed event fails if its delay is negative.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=-2000" + % self.room_id + ).encode("ascii"), + {"body": "test", "msgtype": "m.text"}, + ) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) + self.assertEqual( + Codes.INVALID_PARAM, channel.json_body["errcode"], channel.json_body + ) + + @unittest.override_config({"max_event_delay_duration": "24h"}) + def test_send_delayed_message_event(self) -> None: + """Test sending a valid delayed message event.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/send/m.room.message/mid1?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {"body": "test", "msgtype": "m.text"}, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + @unittest.override_config({"max_event_delay_duration": "24h"}) + def test_send_delayed_state_event(self) -> None: + """Test sending a valid delayed state event.""" + channel = self.make_request( + "PUT", + ( + "rooms/%s/state/m.room.topic/?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {"topic": "This is a topic"}, + ) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + @unittest.override_config( + { + "max_event_delay_duration": "24h", + "rc_message": {"per_second": 1, "burst_count": 2}, + } + ) + def test_add_delayed_event_ratelimit(self) -> None: + """Test that requests to schedule new delayed events are ratelimited by a RateLimiter, + which ratelimits them correctly, including by not limiting when the requester is + exempt from ratelimiting. + """ + + # Test that new delayed events are correctly ratelimited. + args = ( + "POST", + ( + "rooms/%s/send/m.room.message?org.matrix.msc4140.delay=2000" + % self.room_id + ).encode("ascii"), + {"body": "test", "msgtype": "m.text"}, + ) + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.TOO_MANY_REQUESTS, channel.code, channel.result) + + # Add the current user to the ratelimit overrides, allowing them no ratelimiting. + self.get_success( + self.hs.get_datastores().main.set_ratelimit_for_user(self.user_id, 0, 0) + ) + + # Test that the new delayed events aren't ratelimited anymore. + channel = self.make_request(*args) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) + + class RoomSearchTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -2457,6 +2601,11 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): tok=self.token, ) + def default_config(self) -> JsonDict: + config = default_config("test") + config["room_list_publication_rules"] = [{"action": "allow"}] + return config + def make_public_rooms_request( self, room_types: Optional[List[Union[str, None]]], @@ -2794,6 +2943,68 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): self.assertEqual(event_content.get("reason"), reason, channel.result) +class RoomForgottenTestCase(unittest.HomeserverTestCase): + """ + Test forget/forgotten rooms + """ + + servlets = [ + synapse.rest.admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def test_room_not_forgotten_after_unban(self) -> None: + """ + Test what happens when someone is banned from a room, they forget the room, and + some time later are unbanned. + + Currently, when they are unbanned, the room isn't forgotten anymore which may or + may not be expected. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # User1 is banned and forgets the room + self.helper.ban(room_id, src=user2_id, targ=user1_id, tok=user2_tok) + # User1 forgets the room + self.get_success(self.store.forget(user1_id, room_id)) + + # The room should show up as forgotten + forgotten_room_ids = self.get_success( + self.store.get_forgotten_rooms_for_user(user1_id) + ) + self.assertIncludes(forgotten_room_ids, {room_id}, exact=True) + + # Unban user1 + self.helper.change_membership( + room=room_id, + src=user2_id, + targ=user1_id, + membership=Membership.LEAVE, + tok=user2_tok, + ) + + # Room is no longer forgotten because it's a new membership + # + # XXX: Is this how we actually want it to behave? It seems like ideally, the + # room forgotten status should only be reset when the user decides to join again + # (or is invited/knocks). This way the room remains forgotten for any ban/leave + # transitions. + forgotten_room_ids = self.get_success( + self.store.get_forgotten_rooms_for_user(user1_id) + ) + self.assertIncludes(forgotten_room_ids, set(), exact=True) + + class LabelsTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -3577,191 +3788,6 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): self._set_canonical_alias({"alt_aliases": ["@unknown:test"]}, expected_code=400) -class ThreepidInviteTestCase(unittest.HomeserverTestCase): - servlets = [ - admin.register_servlets, - login.register_servlets, - room.register_servlets, - ] - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.user_id = self.register_user("thomas", "hackme") - self.tok = self.login("thomas", "hackme") - - self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - - def test_threepid_invite_spamcheck_deprecated(self) -> None: - """ - Test allowing/blocking threepid invites with a spam-check module. - - In this test, we use the deprecated API in which callbacks return a bool. - """ - # Mock a few functions to prevent the test from failing due to failing to talk to - # a remote IS. We keep the mock for make_and_store_3pid_invite around so we - # can check its call_count later on during the test. - make_invite_mock = AsyncMock(return_value=(Mock(event_id="abc"), 0)) - self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[method-assign] - self.hs.get_identity_handler().lookup_3pid = AsyncMock( # type: ignore[method-assign] - return_value=None, - ) - - # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it - # allow everything for now. - # `spec` argument is needed for this function mock to have `__qualname__`, which - # is needed for `Measure` metrics buried in SpamChecker. - mock = AsyncMock(return_value=True, spec=lambda *x: None) - self.hs.get_module_api_callbacks().spam_checker._user_may_send_3pid_invite_callbacks.append( - mock - ) - - # Send a 3PID invite into the room and check that it succeeded. - email_to_invite = "teresa@example.com" - channel = self.make_request( - method="POST", - path="/rooms/" + self.room_id + "/invite", - content={ - "id_server": "example.com", - "id_access_token": "sometoken", - "medium": "email", - "address": email_to_invite, - }, - access_token=self.tok, - ) - self.assertEqual(channel.code, 200) - - # Check that the callback was called with the right params. - mock.assert_called_with(self.user_id, "email", email_to_invite, self.room_id) - - # Check that the call to send the invite was made. - make_invite_mock.assert_called_once() - - # Now change the return value of the callback to deny any invite and test that - # we can't send the invite. - mock.return_value = False - channel = self.make_request( - method="POST", - path="/rooms/" + self.room_id + "/invite", - content={ - "id_server": "example.com", - "id_access_token": "sometoken", - "medium": "email", - "address": email_to_invite, - }, - access_token=self.tok, - ) - self.assertEqual(channel.code, 403) - - # Also check that it stopped before calling _make_and_store_3pid_invite. - make_invite_mock.assert_called_once() - - def test_threepid_invite_spamcheck(self) -> None: - """ - Test allowing/blocking threepid invites with a spam-check module. - - In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`. - """ - # Mock a few functions to prevent the test from failing due to failing to talk to - # a remote IS. We keep the mock for make_and_store_3pid_invite around so we - # can check its call_count later on during the test. - make_invite_mock = AsyncMock(return_value=(Mock(event_id="abc"), 0)) - self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[method-assign] - self.hs.get_identity_handler().lookup_3pid = AsyncMock( # type: ignore[method-assign] - return_value=None, - ) - - # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it - # allow everything for now. - # `spec` argument is needed for this function mock to have `__qualname__`, which - # is needed for `Measure` metrics buried in SpamChecker. - mock = AsyncMock( - return_value=synapse.module_api.NOT_SPAM, - spec=lambda *x: None, - ) - self.hs.get_module_api_callbacks().spam_checker._user_may_send_3pid_invite_callbacks.append( - mock - ) - - # Send a 3PID invite into the room and check that it succeeded. - email_to_invite = "teresa@example.com" - channel = self.make_request( - method="POST", - path="/rooms/" + self.room_id + "/invite", - content={ - "id_server": "example.com", - "id_access_token": "sometoken", - "medium": "email", - "address": email_to_invite, - }, - access_token=self.tok, - ) - self.assertEqual(channel.code, 200) - - # Check that the callback was called with the right params. - mock.assert_called_with(self.user_id, "email", email_to_invite, self.room_id) - - # Check that the call to send the invite was made. - make_invite_mock.assert_called_once() - - # Now change the return value of the callback to deny any invite and test that - # we can't send the invite. We pick an arbitrary error code to be able to check - # that the same code has been returned - mock.return_value = Codes.CONSENT_NOT_GIVEN - channel = self.make_request( - method="POST", - path="/rooms/" + self.room_id + "/invite", - content={ - "id_server": "example.com", - "id_access_token": "sometoken", - "medium": "email", - "address": email_to_invite, - }, - access_token=self.tok, - ) - self.assertEqual(channel.code, 403) - self.assertEqual(channel.json_body["errcode"], Codes.CONSENT_NOT_GIVEN) - - # Also check that it stopped before calling _make_and_store_3pid_invite. - make_invite_mock.assert_called_once() - - # Run variant with `Tuple[Codes, dict]`. - mock.return_value = (Codes.EXPIRED_ACCOUNT, {"field": "value"}) - channel = self.make_request( - method="POST", - path="/rooms/" + self.room_id + "/invite", - content={ - "id_server": "example.com", - "id_access_token": "sometoken", - "medium": "email", - "address": email_to_invite, - }, - access_token=self.tok, - ) - self.assertEqual(channel.code, 403) - self.assertEqual(channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT) - self.assertEqual(channel.json_body["field"], "value") - - # Also check that it stopped before calling _make_and_store_3pid_invite. - make_invite_mock.assert_called_once() - - def test_400_missing_param_without_id_access_token(self) -> None: - """ - Test that a 3pid invite request returns 400 M_MISSING_PARAM - if we do not include id_access_token. - """ - channel = self.make_request( - method="POST", - path="/rooms/" + self.room_id + "/invite", - content={ - "id_server": "example.com", - "medium": "email", - "address": "teresa@example.com", - }, - access_token=self.tok, - ) - self.assertEqual(channel.code, 400) - self.assertEqual(channel.json_body["errcode"], "M_MISSING_PARAM") - - class TimestampLookupTestCase(unittest.HomeserverTestCase): servlets = [ admin.register_servlets, @@ -3836,10 +3862,25 @@ class UserSuspensionTests(unittest.HomeserverTestCase): self.user2 = self.register_user("teresa", "hackme") self.tok2 = self.login("teresa", "hackme") - self.room1 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) + self.admin = self.register_user("admin", "pass", True) + self.admin_tok = self.login("admin", "pass") + + self.room1 = self.helper.create_room_as( + room_creator=self.user1, tok=self.tok1, room_version="11" + ) self.store = hs.get_datastores().main - def test_suspended_user_cannot_send_message_to_room(self) -> None: + self.room2 = self.helper.create_room_as( + room_creator=self.user1, is_public=False, tok=self.tok1 + ) + self.helper.send_state( + self.room2, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=self.tok1, + ) + + def test_suspended_user_cannot_send_message_to_public_room(self) -> None: # set the user as suspended self.get_success(self.store.set_user_suspended_status(self.user1, True)) @@ -3849,9 +3890,25 @@ class UserSuspensionTests(unittest.HomeserverTestCase): access_token=self.tok1, content={"body": "hello", "msgtype": "m.text"}, ) - self.assertEqual( - channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" + self.assertEqual(channel.json_body["errcode"], "M_USER_SUSPENDED") + + def test_suspended_user_cannot_send_message_to_encrypted_room(self) -> None: + channel = self.make_request( + "PUT", + f"/_synapse/admin/v1/suspend/{self.user1}", + {"suspend": True}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body, {f"user_{self.user1}_suspended": True}) + + channel = self.make_request( + "PUT", + f"/rooms/{self.room2}/send/m.room.encrypted/1", + access_token=self.tok1, + content={}, ) + self.assertEqual(channel.json_body["errcode"], "M_USER_SUSPENDED") def test_suspended_user_cannot_change_profile_data(self) -> None: # set the user as suspended @@ -3864,9 +3921,7 @@ class UserSuspensionTests(unittest.HomeserverTestCase): content={"avatar_url": "mxc://matrix.org/wefh34uihSDRGhw34"}, shorthand=False, ) - self.assertEqual( - channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" - ) + self.assertEqual(channel.json_body["errcode"], "M_USER_SUSPENDED") channel2 = self.make_request( "PUT", @@ -3875,9 +3930,7 @@ class UserSuspensionTests(unittest.HomeserverTestCase): content={"displayname": "something offensive"}, shorthand=False, ) - self.assertEqual( - channel2.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" - ) + self.assertEqual(channel2.json_body["errcode"], "M_USER_SUSPENDED") def test_suspended_user_cannot_redact_messages_other_than_their_own(self) -> None: # first user sends message @@ -3911,9 +3964,7 @@ class UserSuspensionTests(unittest.HomeserverTestCase): content={"reason": "bogus"}, shorthand=False, ) - self.assertEqual( - channel.json_body["errcode"], "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" - ) + self.assertEqual(channel.json_body["errcode"], "M_USER_SUSPENDED") # but can redact their own channel = self.make_request( @@ -3924,3 +3975,244 @@ class UserSuspensionTests(unittest.HomeserverTestCase): shorthand=False, ) self.assertEqual(channel.code, 200) + + channel = self.make_request( + "PUT", + f"/_matrix/client/v3/rooms/{self.room1}/send/m.room.redaction/3456346", + access_token=self.tok1, + content={"reason": "bogus", "redacts": event_id}, + shorthand=False, + ) + self.assertEqual(channel.json_body["errcode"], "M_USER_SUSPENDED") + + channel = self.make_request( + "PUT", + f"/_matrix/client/v3/rooms/{self.room1}/send/m.room.redaction/3456346", + access_token=self.tok1, + content={"reason": "bogus", "redacts": event_id2}, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + + def test_suspended_user_cannot_ban_others(self) -> None: + # user to ban joins room user1 created + self.make_request("POST", f"/rooms/{self.room1}/join", access_token=self.tok2) + + # suspend user1 + self.get_success(self.store.set_user_suspended_status(self.user1, True)) + + # user1 tries to ban other user while suspended + channel = self.make_request( + "POST", + f"/_matrix/client/v3/rooms/{self.room1}/ban", + access_token=self.tok1, + content={"reason": "spite", "user_id": self.user2}, + shorthand=False, + ) + self.assertEqual(channel.json_body["errcode"], "M_USER_SUSPENDED") + + # un-suspend user1 + self.get_success(self.store.set_user_suspended_status(self.user1, False)) + + # ban now goes through + channel = self.make_request( + "POST", + f"/_matrix/client/v3/rooms/{self.room1}/ban", + access_token=self.tok1, + content={"reason": "spite", "user_id": self.user2}, + shorthand=False, + ) + self.assertEqual(channel.code, 200) + + +class RoomParticipantTestCase(unittest.HomeserverTestCase): + servlets = [ + login.register_servlets, + room.register_servlets, + profile.register_servlets, + admin.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.user1 = self.register_user("thomas", "hackme") + self.tok1 = self.login("thomas", "hackme") + + self.user2 = self.register_user("teresa", "hackme") + self.tok2 = self.login("teresa", "hackme") + + self.room1 = self.helper.create_room_as( + room_creator=self.user1, + tok=self.tok1, + # Allow user2 to send state events into the room. + extra_content={ + "power_level_content_override": { + "state_default": 0, + }, + }, + ) + self.store = hs.get_datastores().main + + @parameterized.expand( + [ + # Should record participation. + param( + is_state=False, + event_type="m.room.message", + event_content={ + "msgtype": "m.text", + "body": "I am engaging in this room", + }, + record_participation=True, + ), + param( + is_state=False, + event_type="m.room.encrypted", + event_content={ + "algorithm": "m.megolm.v1.aes-sha2", + "ciphertext": "AwgAEnACgAkLmt6qF84IK++J7UDH2Za1YVchHyprqTqsg...", + "device_id": "RJYKSTBOIE", + "sender_key": "IlRMeOPX2e0MurIyfWEucYBRVOEEUMrOHqn/8mLqMjA", + "session_id": "X3lUlvLELLYxeTx4yOVu6UDpasGEVO0Jbu+QFnm0cKQ", + }, + record_participation=True, + ), + # Should not record participation. + param( + is_state=False, + event_type="m.sticker", + event_content={ + "body": "My great sticker", + "info": {}, + "url": "mxc://unused/mxcurl", + }, + record_participation=False, + ), + # An invalid **state event** with type `m.room.message` + param( + is_state=True, + event_type="m.room.message", + event_content={ + "msgtype": "m.text", + "body": "I am engaging in this room", + }, + record_participation=False, + ), + # An invalid **state event** with type `m.room.encrypted` + # Note: this may become valid in the future with encrypted state, though we + # still may not want to consider it grounds for marking a user as participating. + param( + is_state=True, + event_type="m.room.encrypted", + event_content={ + "algorithm": "m.megolm.v1.aes-sha2", + "ciphertext": "AwgAEnACgAkLmt6qF84IK++J7UDH2Za1YVchHyprqTqsg...", + "device_id": "RJYKSTBOIE", + "sender_key": "IlRMeOPX2e0MurIyfWEucYBRVOEEUMrOHqn/8mLqMjA", + "session_id": "X3lUlvLELLYxeTx4yOVu6UDpasGEVO0Jbu+QFnm0cKQ", + }, + record_participation=False, + ), + ] + ) + def test_sending_message_records_participation( + self, + is_state: bool, + event_type: str, + event_content: JsonDict, + record_participation: bool, + ) -> None: + """ + Test that sending an various events into a room causes the user to + appropriately marked or not marked as a participant in that room. + """ + self.helper.join(self.room1, self.user2, tok=self.tok2) + + # user has not sent any messages, so should not be a participant + participant = self.get_success( + self.store.get_room_participation(self.user2, self.room1) + ) + self.assertFalse(participant) + + # send an event into the room + if is_state: + # send a state event + self.helper.send_state( + self.room1, + event_type, + body=event_content, + tok=self.tok2, + ) + else: + # send a non-state event + self.helper.send_event( + self.room1, + event_type, + content=event_content, + tok=self.tok2, + ) + + # check whether the user has been marked as a participant + participant = self.get_success( + self.store.get_room_participation(self.user2, self.room1) + ) + self.assertEqual(participant, record_participation) + + @parameterized.expand( + [ + param( + event_type="m.room.message", + event_content={ + "msgtype": "m.text", + "body": "I am engaging in this room", + }, + ), + param( + event_type="m.room.encrypted", + event_content={ + "algorithm": "m.megolm.v1.aes-sha2", + "ciphertext": "AwgAEnACgAkLmt6qF84IK++J7UDH2Za1YVchHyprqTqsg...", + "device_id": "RJYKSTBOIE", + "sender_key": "IlRMeOPX2e0MurIyfWEucYBRVOEEUMrOHqn/8mLqMjA", + "session_id": "X3lUlvLELLYxeTx4yOVu6UDpasGEVO0Jbu+QFnm0cKQ", + }, + ), + ] + ) + def test_sending_event_and_leaving_does_not_record_participation( + self, + event_type: str, + event_content: JsonDict, + ) -> None: + """ + Test that sending an event into a room that should mark a user as a + participant, but then leaving the room, results in the user no longer + be marked as a participant in that room. + """ + self.helper.join(self.room1, self.user2, tok=self.tok2) + + # user has not sent any messages, so should not be a participant + participant = self.get_success( + self.store.get_room_participation(self.user2, self.room1) + ) + self.assertFalse(participant) + + # sending a message should now mark user as participant + self.helper.send_event( + self.room1, + event_type, + content=event_content, + tok=self.tok2, + ) + participant = self.get_success( + self.store.get_room_participation(self.user2, self.room1) + ) + self.assertTrue(participant) + + # leave the room + self.helper.leave(self.room1, self.user2, tok=self.tok2) + + # user should no longer be considered a participant + participant = self.get_success( + self.store.get_room_participation(self.user2, self.room1) + ) + self.assertFalse(participant) diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py
index 2287f233b4..b406a578f0 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py
@@ -88,35 +88,6 @@ class RoomTestCase(_ShadowBannedBase): ) self.assertEqual(invited_rooms, []) - def test_invite_3pid(self) -> None: - """Ensure that a 3PID invite does not attempt to contact the identity server.""" - identity_handler = self.hs.get_identity_handler() - identity_handler.lookup_3pid = Mock( # type: ignore[method-assign] - side_effect=AssertionError("This should not get called") - ) - - # The create works fine. - room_id = self.helper.create_room_as( - self.banned_user_id, tok=self.banned_access_token - ) - - # Inviting the user completes successfully. - channel = self.make_request( - "POST", - "/rooms/%s/invite" % (room_id,), - { - "id_server": "test", - "medium": "email", - "address": "test@test.test", - "id_access_token": "anytoken", - }, - access_token=self.banned_access_token, - ) - self.assertEqual(200, channel.code, channel.result) - - # This should have raised an error earlier, but double check this wasn't called. - identity_handler.lookup_3pid.assert_not_called() - def test_create_room(self) -> None: """Invitations during a room creation should be discarded, but the room still gets created.""" # The room creation is successful. diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py
index 63df31ec75..c52a5b2e79 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py
@@ -282,22 +282,33 @@ class SyncTypingTests(unittest.HomeserverTestCase): self.assertEqual(200, channel.code) next_batch = channel.json_body["next_batch"] - # This should time out! But it does not, because our stream token is - # ahead, and therefore it's saying the typing (that we've actually - # already seen) is new, since it's got a token above our new, now-reset - # stream token. - channel = self.make_request("GET", sync_url % (access_token, next_batch)) - self.assertEqual(200, channel.code) - next_batch = channel.json_body["next_batch"] - # Clear the typing information, so that it doesn't think everything is - # in the future. + # in the future. This happens automatically when the typing stream + # resets. typing._reset() - # Now it SHOULD fail as it never completes! + # Nothing new, so we time out. with self.assertRaises(TimedOutException): self.make_request("GET", sync_url % (access_token, next_batch)) + # Sync and start typing again. + sync_channel = self.make_request( + "GET", sync_url % (access_token, next_batch), await_result=False + ) + self.assertFalse(sync_channel.is_finished()) + + channel = self.make_request( + "PUT", + typing_url % (room, other_user_id, other_access_token), + b'{"typing": true, "timeout": 30000}', + ) + self.assertEqual(200, channel.code) + + # Sync should now return. + sync_channel.await_result() + self.assertEqual(200, sync_channel.code) + next_batch = sync_channel.json_body["next_batch"] + class SyncKnockTestCase(KnockingStrippedStateEventHelperMixin): servlets = [ diff --git a/tests/rest/client/test_tags.py b/tests/rest/client/test_tags.py new file mode 100644
index 0000000000..5d596409e1 --- /dev/null +++ b/tests/rest/client/test_tags.py
@@ -0,0 +1,95 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + +"""Tests REST events for /tags paths.""" + +from http import HTTPStatus + +import synapse.rest.admin +from synapse.rest.client import login, room, tags + +from tests import unittest + + +class RoomTaggingTestCase(unittest.HomeserverTestCase): + """Tests /user/$user_id/rooms/$room_id/tags/$tag REST API.""" + + servlets = [ + room.register_servlets, + tags.register_servlets, + login.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + ] + + def test_put_tag_checks_room_membership(self) -> None: + """ + Test that a user can add a tag to a room if they have membership to the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + tag = "test_tag" + + # Make the request + channel = self.make_request( + "PUT", + f"/user/{user1_id}/rooms/{room_id}/tags/{tag}", + content={"order": 0.5}, + access_token=user1_tok, + ) + # Check that the request was successful + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + def test_put_tag_fails_if_not_in_room(self) -> None: + """ + Test that a user cannot add a tag to a room if they don't have membership to the + room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + # Create the room with user2 (user1 has no membership in the room) + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + tag = "test_tag" + + # Make the request + channel = self.make_request( + "PUT", + f"/user/{user1_id}/rooms/{room_id}/tags/{tag}", + content={"order": 0.5}, + access_token=user1_tok, + ) + # Check that the request failed with the correct error + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) + + def test_put_tag_fails_if_room_does_not_exist(self) -> None: + """ + Test that a user cannot add a tag to a room if the room doesn't exist (therefore + no membership in the room.) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + room_id = "!nonexistent:test" + tag = "test_tag" + + # Make the request + channel = self.make_request( + "PUT", + f"/user/{user1_id}/rooms/{room_id}/tags/{tag}", + content={"order": 0.5}, + access_token=user1_tok, + ) + # Check that the request failed with the correct error + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py
index d10df1a90f..f02317533e 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py
@@ -915,162 +915,3 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): # Check that the mock was called with the right room ID self.assertEqual(args[1], self.room_id) - - def test_on_threepid_bind(self) -> None: - """Tests that the on_threepid_bind module callback is called correctly after - associating a 3PID to an account. - """ - # Register a mocked callback. - threepid_bind_mock = AsyncMock(return_value=None) - third_party_rules = self.hs.get_module_api_callbacks().third_party_event_rules - third_party_rules._on_threepid_bind_callbacks.append(threepid_bind_mock) - - # Register an admin user. - self.register_user("admin", "password", admin=True) - admin_tok = self.login("admin", "password") - - # Also register a normal user we can modify. - user_id = self.register_user("user", "password") - - # Add a 3PID to the user. - channel = self.make_request( - "PUT", - "/_synapse/admin/v2/users/%s" % user_id, - { - "threepids": [ - { - "medium": "email", - "address": "foo@example.com", - }, - ], - }, - access_token=admin_tok, - ) - - # Check that the shutdown was blocked - self.assertEqual(channel.code, 200, channel.json_body) - - # Check that the mock was called once. - threepid_bind_mock.assert_called_once() - args = threepid_bind_mock.call_args[0] - - # Check that the mock was called with the right parameters - self.assertEqual(args, (user_id, "email", "foo@example.com")) - - def test_on_add_and_remove_user_third_party_identifier(self) -> None: - """Tests that the on_add_user_third_party_identifier and - on_remove_user_third_party_identifier module callbacks are called - just before associating and removing a 3PID to/from an account. - """ - # Pretend to be a Synapse module and register both callbacks as mocks. - on_add_user_third_party_identifier_callback_mock = AsyncMock(return_value=None) - on_remove_user_third_party_identifier_callback_mock = AsyncMock( - return_value=None - ) - self.hs.get_module_api().register_third_party_rules_callbacks( - on_add_user_third_party_identifier=on_add_user_third_party_identifier_callback_mock, - on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, - ) - - # Register an admin user. - self.register_user("admin", "password", admin=True) - admin_tok = self.login("admin", "password") - - # Also register a normal user we can modify. - user_id = self.register_user("user", "password") - - # Add a 3PID to the user. - channel = self.make_request( - "PUT", - "/_synapse/admin/v2/users/%s" % user_id, - { - "threepids": [ - { - "medium": "email", - "address": "foo@example.com", - }, - ], - }, - access_token=admin_tok, - ) - - # Check that the mocked add callback was called with the appropriate - # 3PID details. - self.assertEqual(channel.code, 200, channel.json_body) - on_add_user_third_party_identifier_callback_mock.assert_called_once() - args = on_add_user_third_party_identifier_callback_mock.call_args[0] - self.assertEqual(args, (user_id, "email", "foo@example.com")) - - # Now remove the 3PID from the user - channel = self.make_request( - "PUT", - "/_synapse/admin/v2/users/%s" % user_id, - { - "threepids": [], - }, - access_token=admin_tok, - ) - - # Check that the mocked remove callback was called with the appropriate - # 3PID details. - self.assertEqual(channel.code, 200, channel.json_body) - on_remove_user_third_party_identifier_callback_mock.assert_called_once() - args = on_remove_user_third_party_identifier_callback_mock.call_args[0] - self.assertEqual(args, (user_id, "email", "foo@example.com")) - - def test_on_remove_user_third_party_identifier_is_called_on_deactivate( - self, - ) -> None: - """Tests that the on_remove_user_third_party_identifier module callback is called - when a user is deactivated and their third-party ID associations are deleted. - """ - # Pretend to be a Synapse module and register both callbacks as mocks. - on_remove_user_third_party_identifier_callback_mock = AsyncMock( - return_value=None - ) - self.hs.get_module_api().register_third_party_rules_callbacks( - on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, - ) - - # Register an admin user. - self.register_user("admin", "password", admin=True) - admin_tok = self.login("admin", "password") - - # Also register a normal user we can modify. - user_id = self.register_user("user", "password") - - # Add a 3PID to the user. - channel = self.make_request( - "PUT", - "/_synapse/admin/v2/users/%s" % user_id, - { - "threepids": [ - { - "medium": "email", - "address": "foo@example.com", - }, - ], - }, - access_token=admin_tok, - ) - self.assertEqual(channel.code, 200, channel.json_body) - - # Check that the mock was not called on the act of adding a third-party ID. - on_remove_user_third_party_identifier_callback_mock.assert_not_called() - - # Now deactivate the user. - channel = self.make_request( - "PUT", - "/_synapse/admin/v2/users/%s" % user_id, - { - "deactivated": True, - }, - access_token=admin_tok, - ) - - # Check that the mocked remove callback was called with the appropriate - # 3PID details. - self.assertEqual(channel.code, 200, channel.json_body) - on_remove_user_third_party_identifier_callback_mock.assert_called_once() - args = on_remove_user_third_party_identifier_callback_mock.call_args[0] - self.assertEqual(args, (user_id, "email", "foo@example.com")) diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py
index e43140720d..280486da08 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py
@@ -31,6 +31,7 @@ from typing import ( AnyStr, Dict, Iterable, + Literal, Mapping, MutableMapping, Optional, @@ -40,12 +41,11 @@ from typing import ( from urllib.parse import urlencode import attr -from typing_extensions import Literal from twisted.test.proto_helpers import MemoryReactorClock from twisted.web.server import Site -from synapse.api.constants import Membership +from synapse.api.constants import Membership, ReceiptTypes from synapse.api.errors import Codes from synapse.server import HomeServer from synapse.types import JsonDict @@ -330,22 +330,24 @@ class RestHelper: data, ) - assert ( - channel.code == expect_code - ), "Expected: %d, got: %d, PUT %s -> resp: %r" % ( - expect_code, - channel.code, - path, - channel.result["body"], + assert channel.code == expect_code, ( + "Expected: %d, got: %d, PUT %s -> resp: %r" + % ( + expect_code, + channel.code, + path, + channel.result["body"], + ) ) if expect_errcode: - assert ( - str(channel.json_body["errcode"]) == expect_errcode - ), "Expected: %r, got: %r, resp: %r" % ( - expect_errcode, - channel.json_body["errcode"], - channel.result["body"], + assert str(channel.json_body["errcode"]) == expect_errcode, ( + "Expected: %r, got: %r, resp: %r" + % ( + expect_errcode, + channel.json_body["errcode"], + channel.result["body"], + ) ) if expect_additional_fields is not None: @@ -354,13 +356,14 @@ class RestHelper: expect_key, channel.json_body, ) - assert ( - channel.json_body[expect_key] == expect_value - ), "Expected: %s at %s, got: %s, resp: %s" % ( - expect_value, - expect_key, - channel.json_body[expect_key], - channel.json_body, + assert channel.json_body[expect_key] == expect_value, ( + "Expected: %s at %s, got: %s, resp: %s" + % ( + expect_value, + expect_key, + channel.json_body[expect_key], + channel.json_body, + ) ) self.auth_user_id = temp_id @@ -545,7 +548,7 @@ class RestHelper: room_id: str, event_type: str, body: Dict[str, Any], - tok: Optional[str], + tok: Optional[str] = None, expect_code: int = HTTPStatus.OK, state_key: str = "", ) -> JsonDict: @@ -713,9 +716,9 @@ class RestHelper: "/login", content={"type": "m.login.token", "token": login_token}, ) - assert ( - channel.code == expected_status - ), f"unexpected status in response: {channel.code}" + assert channel.code == expected_status, ( + f"unexpected status in response: {channel.code}" + ) return channel.json_body def auth_via_oidc( @@ -886,7 +889,7 @@ class RestHelper: "GET", uri, ) - assert channel.code == 302 + assert channel.code == 302, f"Expected 302 for {uri}, got {channel.code}" # hit the redirect url again with the right Host header, which should now issue # a cookie and redirect to the SSO provider. @@ -898,17 +901,18 @@ class RestHelper: location = get_location(channel) parts = urllib.parse.urlsplit(location) + next_uri = urllib.parse.urlunsplit(("", "") + parts[2:]) channel = make_request( self.reactor, self.site, "GET", - urllib.parse.urlunsplit(("", "") + parts[2:]), + next_uri, custom_headers=[ ("Host", parts[1]), ], ) - assert channel.code == 302 + assert channel.code == 302, f"Expected 302 for {next_uri}, got {channel.code}" channel.extract_cookies(cookies) return get_location(channel) @@ -944,3 +948,15 @@ class RestHelper: assert len(p.links) == 1, "not exactly one link in confirmation page" oauth_uri = p.links[0] return oauth_uri + + def send_read_receipt(self, room_id: str, event_id: str, *, tok: str) -> None: + """Send a read receipt into the room at the given event""" + channel = make_request( + self.reactor, + self.site, + method="POST", + path=f"/rooms/{room_id}/receipt/{ReceiptTypes.READ}/{event_id}", + content={}, + access_token=tok, + ) + assert channel.code == HTTPStatus.OK, channel.text_body diff --git a/tests/rest/media/test_domain_blocking.py b/tests/rest/media/test_domain_blocking.py
index 72205c6bb3..26453f70dd 100644 --- a/tests/rest/media/test_domain_blocking.py +++ b/tests/rest/media/test_domain_blocking.py
@@ -61,6 +61,7 @@ class MediaDomainBlockingTests(unittest.HomeserverTestCase): time_now_ms=clock.time_msec(), upload_name="test.png", filesystem_id=file_id, + sha256=file_id, ) ) @@ -91,7 +92,8 @@ class MediaDomainBlockingTests(unittest.HomeserverTestCase): { # Disable downloads from a domain we won't be requesting downloads from. # This proves we haven't broken anything. - "prevent_media_downloads_from": ["not-listed.com"] + "prevent_media_downloads_from": ["not-listed.com"], + "enable_authenticated_media": False, } ) def test_remote_media_normally_unblocked(self) -> None: @@ -132,6 +134,7 @@ class MediaDomainBlockingTests(unittest.HomeserverTestCase): # This proves we haven't broken anything. "prevent_media_downloads_from": ["not-listed.com"], "dynamic_thumbnails": True, + "enable_authenticated_media": False, } ) def test_remote_media_thumbnail_normally_unblocked(self) -> None: diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py
index a96f0e7fca..2a7bee19f9 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py
@@ -42,6 +42,7 @@ from synapse.util.stringutils import parse_and_validate_mxc_uri from tests import unittest from tests.server import FakeTransport from tests.test_utils import SMALL_PNG +from tests.unittest import override_config try: import lxml @@ -877,7 +878,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): data = base64.b64encode(SMALL_PNG) end_content = ( - b"<html><head>" b'<img src="data:image/png;base64,%s" />' b"</head></html>" + b'<html><head><img src="data:image/png;base64,%s" /></head></html>' ) % (data,) channel = self.make_request( @@ -1259,6 +1260,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertIsNone(_port) return host, media_id + @override_config({"enable_authenticated_media": False}) def test_storage_providers_exclude_files(self) -> None: """Test that files are not stored in or fetched from storage providers.""" host, media_id = self._download_image() @@ -1301,6 +1303,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): "URL cache file was unexpectedly retrieved from a storage provider", ) + @override_config({"enable_authenticated_media": False}) def test_storage_providers_exclude_thumbnails(self) -> None: """Test that thumbnails are not stored in or fetched from storage providers.""" host, media_id = self._download_image() diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py
index e166c13bc1..96a4f5598e 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py
@@ -17,6 +17,8 @@ # [This file includes modifications made by New Vector Limited] # # +from unittest.mock import AsyncMock + from twisted.web.resource import Resource from synapse.rest.well_known import well_known_resource @@ -35,7 +37,6 @@ class WellKnownTests(unittest.HomeserverTestCase): @unittest.override_config( { "public_baseurl": "https://tesths", - "default_identity_server": "https://testis", } ) def test_client_well_known(self) -> None: @@ -48,7 +49,6 @@ class WellKnownTests(unittest.HomeserverTestCase): channel.json_body, { "m.homeserver": {"base_url": "https://tesths/"}, - "m.identity_server": {"base_url": "https://testis"}, }, ) @@ -67,7 +67,6 @@ class WellKnownTests(unittest.HomeserverTestCase): @unittest.override_config( { "public_baseurl": "https://tesths", - "default_identity_server": "https://testis", "extra_well_known_client_content": {"custom": False}, } ) @@ -81,7 +80,6 @@ class WellKnownTests(unittest.HomeserverTestCase): channel.json_body, { "m.homeserver": {"base_url": "https://tesths/"}, - "m.identity_server": {"base_url": "https://testis"}, "custom": False, }, ) @@ -112,7 +110,6 @@ class WellKnownTests(unittest.HomeserverTestCase): "msc3861": { "enabled": True, "issuer": "https://issuer", - "account_management_url": "https://my-account.issuer", "client_id": "id", "client_auth_method": "client_secret_post", "client_secret": "secret", @@ -122,18 +119,33 @@ class WellKnownTests(unittest.HomeserverTestCase): } ) def test_client_well_known_msc3861_oauth_delegation(self) -> None: - channel = self.make_request( - "GET", "/.well-known/matrix/client", shorthand=False + # Patch the HTTP client to return the issuer metadata + req_mock = AsyncMock( + return_value={ + "issuer": "https://issuer", + "account_management_uri": "https://my-account.issuer", + } ) + self.hs.get_proxied_http_client().get_json = req_mock # type: ignore[method-assign] - self.assertEqual(channel.code, 200) - self.assertEqual( - channel.json_body, - { - "m.homeserver": {"base_url": "https://homeserver/"}, - "org.matrix.msc2965.authentication": { - "issuer": "https://issuer", - "account": "https://my-account.issuer", + for _ in range(2): + channel = self.make_request( + "GET", "/.well-known/matrix/client", shorthand=False + ) + + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body, + { + "m.homeserver": {"base_url": "https://homeserver/"}, + "org.matrix.msc2965.authentication": { + "issuer": "https://issuer", + "account": "https://my-account.issuer", + }, }, - }, + ) + + # It should have been called exactly once, because it gets cached + req_mock.assert_called_once_with( + "https://issuer/.well-known/openid-configuration" ) diff --git a/tests/server.py b/tests/server.py
index 3e377585ce..f01708b77f 100644 --- a/tests/server.py +++ b/tests/server.py
@@ -58,6 +58,7 @@ import twisted from twisted.enterprise import adbapi from twisted.internet import address, tcp, threads, udp from twisted.internet._resolver import SimpleResolverComplexifier +from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.defer import Deferred, fail, maybeDeferred, succeed from twisted.internet.error import DNSLookupError from twisted.internet.interfaces import ( @@ -73,6 +74,7 @@ from twisted.internet.interfaces import ( IReactorPluggableNameResolver, IReactorTime, IResolverSimple, + ITCPTransport, ITransport, ) from twisted.internet.protocol import ClientFactory, DatagramProtocol, Factory @@ -223,9 +225,9 @@ class FakeChannel: new_headers.addRawHeader(k, v) headers = new_headers - assert isinstance( - headers, Headers - ), f"headers are of the wrong type: {headers!r}" + assert isinstance(headers, Headers), ( + f"headers are of the wrong type: {headers!r}" + ) self.result["headers"] = headers @@ -341,7 +343,6 @@ class FakeSite: self, resource: IResource, reactor: IReactorTime, - experimental_cors_msc3886: bool = False, ): """ @@ -350,7 +351,6 @@ class FakeSite: """ self._resource = resource self.reactor = reactor - self.experimental_cors_msc3886 = experimental_cors_msc3886 def getResourceFor(self, request: Request) -> IResource: return self._resource @@ -780,7 +780,7 @@ def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]: return clock, hs_clock -@implementer(ITransport) +@implementer(ITCPTransport) @attr.s(cmp=False, auto_attribs=True) class FakeTransport: """ @@ -809,12 +809,12 @@ class FakeTransport: will get called back for connectionLost() notifications etc. """ - _peer_address: IAddress = attr.Factory( + _peer_address: Union[IPv4Address, IPv6Address] = attr.Factory( lambda: address.IPv4Address("TCP", "127.0.0.1", 5678) ) """The value to be returned by getPeer""" - _host_address: IAddress = attr.Factory( + _host_address: Union[IPv4Address, IPv6Address] = attr.Factory( lambda: address.IPv4Address("TCP", "127.0.0.1", 1234) ) """The value to be returned by getHost""" @@ -826,10 +826,10 @@ class FakeTransport: producer: Optional[IPushProducer] = None autoflush: bool = True - def getPeer(self) -> IAddress: + def getPeer(self) -> Union[IPv4Address, IPv6Address]: return self._peer_address - def getHost(self) -> IAddress: + def getHost(self) -> Union[IPv4Address, IPv6Address]: return self._host_address def loseConnection(self) -> None: @@ -939,6 +939,51 @@ class FakeTransport: logger.info("FakeTransport: Buffer now empty, completing disconnect") self.disconnected = True + ## ITCPTransport methods. ## + + def loseWriteConnection(self) -> None: + """ + Half-close the write side of a TCP connection. + + If the protocol instance this is attached to provides + IHalfCloseableProtocol, it will get notified when the operation is + done. When closing write connection, as with loseConnection this will + only happen when buffer has emptied and there is no registered + producer. + """ + raise NotImplementedError() + + def getTcpNoDelay(self) -> bool: + """ + Return if C{TCP_NODELAY} is enabled. + """ + return False + + def setTcpNoDelay(self, enabled: bool) -> None: + """ + Enable/disable C{TCP_NODELAY}. + + Enabling C{TCP_NODELAY} turns off Nagle's algorithm. Small packets are + sent sooner, possibly at the expense of overall throughput. + """ + # Ignore setting this. + + def getTcpKeepAlive(self) -> bool: + """ + Return if C{SO_KEEPALIVE} is enabled. + """ + return False + + def setTcpKeepAlive(self, enabled: bool) -> None: + """ + Enable/disable C{SO_KEEPALIVE}. + + Enabling C{SO_KEEPALIVE} sends packets periodically when the connection + is otherwise idle, usually once every two hours. They are intended + to allow detection of lost peers in a non-infinite amount of time. + """ + # Ignore setting this. + def connect_client( reactor: ThreadedMemoryReactorClock, client_id: int @@ -1166,6 +1211,12 @@ def setup_test_homeserver( hs.get_auth_handler().validate_hash = validate_hash # type: ignore[assignment] + # We need to replace the media threadpool with the fake test threadpool. + def thread_pool() -> threadpool.ThreadPool: + return reactor.getThreadPool() + + hs.get_media_sender_thread_pool = thread_pool # type: ignore[method-assign] + # Load any configured modules into the homeserver module_api = hs.get_module_api() for module, module_config in hs.config.modules.loaded_modules: diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index 0e3e4f7293..997ee7b91b 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -89,7 +89,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): return_value="!something:localhost" ) self._rlsn._store.add_tag_to_room = AsyncMock(return_value=None) # type: ignore[method-assign] - self._rlsn._store.get_tags_for_room = AsyncMock(return_value={}) # type: ignore[method-assign] + self._rlsn._store.get_tags_for_room = AsyncMock(return_value={}) @override_config({"hs_disabled": True}) def test_maybe_send_server_notice_disabled_hs(self) -> None: diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py
index fd1f5e7fd5..104d141a72 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py
@@ -20,7 +20,7 @@ # import json from contextlib import contextmanager -from typing import Generator, List, Tuple +from typing import Generator, List, Set, Tuple from unittest import mock from twisted.enterprise.adbapi import ConnectionPool @@ -295,6 +295,53 @@ class EventCacheTestCase(unittest.HomeserverTestCase): self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1) +class GetEventsTestCase(unittest.HomeserverTestCase): + """Test `get_events(...)`/`get_events_as_list(...)`""" + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store: EventsWorkerStore = hs.get_datastores().main + + def test_get_lots_of_messages(self) -> None: + """Sanity check that `get_events(...)`/`get_events_as_list(...)` works""" + num_events = 100 + + user_id = self.register_user("user", "pass") + user_tok = self.login(user_id, "pass") + + room_id = self.helper.create_room_as(user_id, tok=user_tok) + + event_ids: Set[str] = set() + for i in range(num_events): + event = self.get_success( + inject_event( + self.hs, + room_id=room_id, + type="m.room.message", + sender=user_id, + content={ + "body": f"foo{i}", + "msgtype": "m.text", + }, + ) + ) + event_ids.add(event.event_id) + + # Sanity check that we actually created the events + self.assertEqual(len(event_ids), num_events) + + # This is the function under test + fetched_event_map = self.get_success(self.store.get_events(event_ids)) + + # Sanity check that we got the events back + self.assertIncludes(fetched_event_map.keys(), event_ids, exact=True) + + class DatabaseOutageTestCase(unittest.HomeserverTestCase): """Test event fetching during a database outage.""" diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py
index 506d981ce6..49dc973a36 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py
@@ -112,6 +112,24 @@ class UpdateUpsertManyTests(unittest.HomeserverTestCase): {(1, "user1", "hello"), (2, "user2", "bleb")}, ) + self.get_success( + self.storage.db_pool.runInteraction( + "test", + self.storage.db_pool.simple_upsert_many_txn, + self.table_name, + key_names=key_names, + key_values=[[2, "user2"]], + value_names=[], + value_values=[], + ) + ) + + # Check results are what we expect + self.assertEqual( + set(self._dump_table_to_tuple()), + {(1, "user1", "hello"), (2, "user2", "bleb")}, + ) + def test_simple_update_many(self) -> None: """ simple_update_many performs many updates at once. diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py
index 2859bcf4bd..0e52dd26ce 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py
@@ -24,6 +24,7 @@ from typing import Iterable, Optional, Set from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import AccountDataTypes +from synapse.api.errors import Codes, SynapseError from synapse.server import HomeServer from synapse.util import Clock @@ -93,6 +94,20 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): # Check the removed user. self.assert_ignorers("@another:remote", {self.user}) + def test_ignoring_self_fails(self) -> None: + """Ensure users cannot add themselves to the ignored list.""" + + f = self.get_failure( + self.store.add_account_data_for_user( + self.user, + AccountDataTypes.IGNORED_USER_LIST, + {"ignored_users": {self.user: {}}}, + ), + SynapseError, + ).value + self.assertEqual(f.code, 400) + self.assertEqual(f.errcode, Codes.INVALID_PARAM) + def test_caching(self) -> None: """Ensure that caching works properly between different users.""" # The first user ignores a user. diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py
index 9420d03841..11313fc933 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py
@@ -349,7 +349,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): ) self.mock_txn.execute.assert_called_once_with( - "UPDATE tablename SET colC = ?, colD = ? WHERE" " colA = ? AND colB = ?", + "UPDATE tablename SET colC = ?, colD = ? WHERE colA = ? AND colB = ?", [3, 4, 1, 2], ) diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py
index ba01b038ab..74edca7523 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py
@@ -211,9 +211,9 @@ class DeviceStoreTestCase(HomeserverTestCase): even if that means leaving an earlier batch one EDU short of the limit. """ - assert self.hs.is_mine_id( - "@user_id:test" - ), "Test not valid: this MXID should be considered local" + assert self.hs.is_mine_id("@user_id:test"), ( + "Test not valid: this MXID should be considered local" + ) self.get_success( self.store.set_e2e_cross_signing_key( diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py
index 088f0d24f9..0500c68e9d 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py
@@ -114,7 +114,7 @@ def get_all_topologically_sorted_orders( # This is implemented by Kahn's algorithm, and forking execution each time # we have a choice over which node to consider next. - degree_map = {node: 0 for node in nodes} + degree_map = dict.fromkeys(nodes, 0) reverse_graph: Dict[T, Set[T]] = {} for node, edges in graph.items(): diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py
index 233066bf82..b095090535 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py
@@ -101,14 +101,6 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): ) self.assertEqual(2, len(http_actions)) - # Fetch unread actions for email pushers. - email_actions = self.get_success( - self.store.get_unread_push_actions_for_user_in_range_for_email( - user_id, 0, 1000, 20 - ) - ) - self.assertEqual(2, len(email_actions)) - # Send a receipt, which should clear the first action. self.get_success( self.store.insert_receipt( @@ -126,12 +118,6 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): ) ) self.assertEqual(1, len(http_actions)) - email_actions = self.get_success( - self.store.get_unread_push_actions_for_user_in_range_for_email( - user_id, 0, 1000, 20 - ) - ) - self.assertEqual(1, len(email_actions)) # Send a thread receipt to clear the thread action. self.get_success( @@ -150,12 +136,6 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): ) ) self.assertEqual([], http_actions) - email_actions = self.get_success( - self.store.get_unread_push_actions_for_user_in_range_for_email( - user_id, 0, 1000, 20 - ) - ) - self.assertEqual([], email_actions) def test_count_aggregation(self) -> None: # Create a user to receive notifications and send receipts. diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py
index 0a7c4c9421..cb3d8e19bc 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py
@@ -19,6 +19,7 @@ # # +import logging from typing import List, Optional from twisted.test.proto_helpers import MemoryReactor @@ -35,6 +36,8 @@ from synapse.util import Clock from tests.unittest import HomeserverTestCase +logger = logging.getLogger(__name__) + class ExtremPruneTestCase(HomeserverTestCase): servlets = [ diff --git a/tests/storage/test_events_bg_updates.py b/tests/storage/test_events_bg_updates.py new file mode 100644
index 0000000000..ecdf413e3b --- /dev/null +++ b/tests/storage/test_events_bg_updates.py
@@ -0,0 +1,157 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# + +from typing import Dict + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.constants import MAX_DEPTH +from synapse.api.room_versions import RoomVersion, RoomVersions +from synapse.server import HomeServer +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + + +class TestFixupMaxDepthCapBgUpdate(HomeserverTestCase): + """Test the background update that caps topological_ordering at MAX_DEPTH.""" + + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + self.store = self.hs.get_datastores().main + self.db_pool = self.store.db_pool + + self.room_id = "!testroom:example.com" + + # Reinsert the background update as it was already run at the start of + # the test. + self.get_success( + self.db_pool.simple_insert( + table="background_updates", + values={ + "update_name": "fixup_max_depth_cap", + "progress_json": "{}", + }, + ) + ) + + def create_room(self, room_version: RoomVersion) -> Dict[str, int]: + """Create a room with a known room version and insert events. + + Returns the set of event IDs that exceed MAX_DEPTH and + their depth. + """ + + # Create a room with a specific room version + self.get_success( + self.db_pool.simple_insert( + table="rooms", + values={ + "room_id": self.room_id, + "room_version": room_version.identifier, + }, + ) + ) + + # Insert events with some depths exceeding MAX_DEPTH + event_id_to_depth: Dict[str, int] = {} + for depth in range(MAX_DEPTH - 5, MAX_DEPTH + 5): + event_id = f"$event{depth}:example.com" + event_id_to_depth[event_id] = depth + + self.get_success( + self.db_pool.simple_insert( + table="events", + values={ + "event_id": event_id, + "room_id": self.room_id, + "topological_ordering": depth, + "depth": depth, + "type": "m.test", + "sender": "@user:test", + "processed": True, + "outlier": False, + }, + ) + ) + + return event_id_to_depth + + def test_fixup_max_depth_cap_bg_update(self) -> None: + """Test that the background update correctly caps topological_ordering + at MAX_DEPTH.""" + + event_id_to_depth = self.create_room(RoomVersions.V6) + + # Run the background update + progress = {"room_id": ""} + batch_size = 10 + num_rooms = self.get_success( + self.store.fixup_max_depth_cap_bg_update(progress, batch_size) + ) + + # Verify the number of rooms processed + self.assertEqual(num_rooms, 1) + + # Verify that the topological_ordering of events has been capped at + # MAX_DEPTH + rows = self.get_success( + self.db_pool.simple_select_list( + table="events", + keyvalues={"room_id": self.room_id}, + retcols=["event_id", "topological_ordering"], + ) + ) + + for event_id, topological_ordering in rows: + if event_id_to_depth[event_id] >= MAX_DEPTH: + # Events with a depth greater than or equal to MAX_DEPTH should + # be capped at MAX_DEPTH. + self.assertEqual(topological_ordering, MAX_DEPTH) + else: + # Events with a depth less than MAX_DEPTH should remain + # unchanged. + self.assertEqual(topological_ordering, event_id_to_depth[event_id]) + + def test_fixup_max_depth_cap_bg_update_old_room_version(self) -> None: + """Test that the background update does not cap topological_ordering for + rooms with old room versions.""" + + event_id_to_depth = self.create_room(RoomVersions.V5) + + # Run the background update + progress = {"room_id": ""} + batch_size = 10 + num_rooms = self.get_success( + self.store.fixup_max_depth_cap_bg_update(progress, batch_size) + ) + + # Verify the number of rooms processed + self.assertEqual(num_rooms, 0) + + # Verify that the topological_ordering of events has been capped at + # MAX_DEPTH + rows = self.get_success( + self.db_pool.simple_select_list( + table="events", + keyvalues={"room_id": self.room_id}, + retcols=["event_id", "topological_ordering"], + ) + ) + + # Assert that the topological_ordering of events has not been changed + # from their depth. + self.assertDictEqual(event_id_to_depth, dict(rows)) diff --git a/tests/storage/test_invite_rule.py b/tests/storage/test_invite_rule.py new file mode 100644
index 0000000000..38c97ecaa3 --- /dev/null +++ b/tests/storage/test_invite_rule.py
@@ -0,0 +1,167 @@ +from synapse.storage.invite_rule import InviteRule, InviteRulesConfig +from synapse.types import UserID + +from tests import unittest + +regular_user = UserID.from_string("@test:example.org") +allowed_user = UserID.from_string("@allowed:allow.example.org") +blocked_user = UserID.from_string("@blocked:block.example.org") +ignored_user = UserID.from_string("@ignored:ignore.example.org") + + +class InviteFilterTestCase(unittest.TestCase): + def test_empty(self) -> None: + """Permit by default""" + config = InviteRulesConfig(None) + self.assertEqual( + config.get_invite_rule(regular_user.to_string()), InviteRule.ALLOW + ) + + def test_ignore_invalid(self) -> None: + """Invalid strings are ignored""" + config = InviteRulesConfig({"blocked_users": ["not a user"]}) + self.assertEqual( + config.get_invite_rule(blocked_user.to_string()), InviteRule.ALLOW + ) + + def test_user_blocked(self) -> None: + """Permit all, except explicitly blocked users""" + config = InviteRulesConfig({"blocked_users": [blocked_user.to_string()]}) + self.assertEqual( + config.get_invite_rule(blocked_user.to_string()), InviteRule.BLOCK + ) + self.assertEqual( + config.get_invite_rule(regular_user.to_string()), InviteRule.ALLOW + ) + + def test_user_ignored(self) -> None: + """Permit all, except explicitly ignored users""" + config = InviteRulesConfig({"ignored_users": [ignored_user.to_string()]}) + self.assertEqual( + config.get_invite_rule(ignored_user.to_string()), InviteRule.IGNORE + ) + self.assertEqual( + config.get_invite_rule(regular_user.to_string()), InviteRule.ALLOW + ) + + def test_user_precedence(self) -> None: + """Always take allowed over ignored, ignored over blocked, and then block.""" + config = InviteRulesConfig( + { + "allowed_users": [allowed_user.to_string()], + "ignored_users": [allowed_user.to_string(), ignored_user.to_string()], + "blocked_users": [ + allowed_user.to_string(), + ignored_user.to_string(), + blocked_user.to_string(), + ], + } + ) + self.assertEqual( + config.get_invite_rule(allowed_user.to_string()), InviteRule.ALLOW + ) + self.assertEqual( + config.get_invite_rule(ignored_user.to_string()), InviteRule.IGNORE + ) + self.assertEqual( + config.get_invite_rule(blocked_user.to_string()), InviteRule.BLOCK + ) + + def test_server_blocked(self) -> None: + """Block all users on the server except those allowed.""" + user_on_same_server = UserID("blocked", allowed_user.domain) + config = InviteRulesConfig( + { + "allowed_users": [allowed_user.to_string()], + "blocked_servers": [allowed_user.domain], + } + ) + self.assertEqual( + config.get_invite_rule(allowed_user.to_string()), InviteRule.ALLOW + ) + self.assertEqual( + config.get_invite_rule(user_on_same_server.to_string()), InviteRule.BLOCK + ) + + def test_server_ignored(self) -> None: + """Ignore all users on the server except those allowed.""" + user_on_same_server = UserID("ignored", allowed_user.domain) + config = InviteRulesConfig( + { + "allowed_users": [allowed_user.to_string()], + "ignored_servers": [allowed_user.domain], + } + ) + self.assertEqual( + config.get_invite_rule(allowed_user.to_string()), InviteRule.ALLOW + ) + self.assertEqual( + config.get_invite_rule(user_on_same_server.to_string()), InviteRule.IGNORE + ) + + def test_server_allow(self) -> None: + """Allow all from a server except explictly blocked or ignored users.""" + blocked_user_on_same_server = UserID("blocked", allowed_user.domain) + ignored_user_on_same_server = UserID("ignored", allowed_user.domain) + allowed_user_on_same_server = UserID("another", allowed_user.domain) + config = InviteRulesConfig( + { + "ignored_users": [ignored_user_on_same_server.to_string()], + "blocked_users": [blocked_user_on_same_server.to_string()], + "allowed_servers": [allowed_user.to_string()], + } + ) + self.assertEqual( + config.get_invite_rule(allowed_user.to_string()), InviteRule.ALLOW + ) + self.assertEqual( + config.get_invite_rule(allowed_user_on_same_server.to_string()), + InviteRule.ALLOW, + ) + self.assertEqual( + config.get_invite_rule(blocked_user_on_same_server.to_string()), + InviteRule.BLOCK, + ) + self.assertEqual( + config.get_invite_rule(ignored_user_on_same_server.to_string()), + InviteRule.IGNORE, + ) + + def test_server_precedence(self) -> None: + """Always take allowed over ignored, ignored over blocked, and then block.""" + config = InviteRulesConfig( + { + "allowed_servers": [allowed_user.domain], + "ignored_servers": [allowed_user.domain, ignored_user.domain], + "blocked_servers": [ + allowed_user.domain, + ignored_user.domain, + blocked_user.domain, + ], + } + ) + self.assertEqual( + config.get_invite_rule(allowed_user.to_string()), InviteRule.ALLOW + ) + self.assertEqual( + config.get_invite_rule(ignored_user.to_string()), InviteRule.IGNORE + ) + self.assertEqual( + config.get_invite_rule(blocked_user.to_string()), InviteRule.BLOCK + ) + + def test_server_glob(self) -> None: + """Test that glob patterns match""" + config = InviteRulesConfig({"blocked_servers": ["*.example.org"]}) + self.assertEqual( + config.get_invite_rule(allowed_user.to_string()), InviteRule.BLOCK + ) + self.assertEqual( + config.get_invite_rule(ignored_user.to_string()), InviteRule.BLOCK + ) + self.assertEqual( + config.get_invite_rule(blocked_user.to_string()), InviteRule.BLOCK + ) + self.assertEqual( + config.get_invite_rule(regular_user.to_string()), InviteRule.ALLOW + ) diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index 15ae582051..c453c8b642 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py
@@ -32,13 +32,6 @@ from tests.unittest import default_config, override_config FORTY_DAYS = 40 * 24 * 60 * 60 -def gen_3pids(count: int) -> List[Dict[str, Any]]: - """Generate `count` threepids as a list.""" - return [ - {"medium": "email", "address": "user%i@matrix.org" % i} for i in range(count) - ] - - class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): def default_config(self) -> Dict[str, Any]: config = default_config("test") @@ -57,87 +50,6 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): # Advance the clock a bit self.reactor.advance(FORTY_DAYS) - @override_config({"max_mau_value": 3, "mau_limit_reserved_threepids": gen_3pids(3)}) - def test_initialise_reserved_users(self) -> None: - threepids = self.hs.config.server.mau_limits_reserved_threepids - - # register three users, of which two have reserved 3pids, and a third - # which is a support user. - user1 = "@user1:server" - user1_email = threepids[0]["address"] - user2 = "@user2:server" - user2_email = threepids[1]["address"] - user3 = "@user3:server" - - self.get_success(self.store.register_user(user_id=user1)) - self.get_success(self.store.register_user(user_id=user2)) - self.get_success( - self.store.register_user(user_id=user3, user_type=UserTypes.SUPPORT) - ) - - now = int(self.hs.get_clock().time_msec()) - self.get_success( - self.store.user_add_threepid(user1, "email", user1_email, now, now) - ) - self.get_success( - self.store.user_add_threepid(user2, "email", user2_email, now, now) - ) - - # XXX why are we doing this here? this function is only run at startup - # so it is odd to re-run it here. - self.get_success( - self.store.db_pool.runInteraction( - "initialise", self.store._initialise_reserved_users, threepids - ) - ) - - # the number of users we expect will be counted against the mau limit - # -1 because user3 is a support user and does not count - user_num = len(threepids) - 1 - - # Check the number of active users. Ensure user3 (support user) is not counted - active_count = self.get_success(self.store.get_monthly_active_count()) - self.assertEqual(active_count, user_num) - - # Test each of the registered users is marked as active - timestamp = self.get_success(self.store.user_last_seen_monthly_active(user1)) - # Mypy notes that one shouldn't compare Optional[int] to 0 with assertGreater. - # Check that timestamp really is an int. - assert timestamp is not None - self.assertGreater(timestamp, 0) - timestamp = self.get_success(self.store.user_last_seen_monthly_active(user2)) - assert timestamp is not None - self.assertGreater(timestamp, 0) - - # Test that users with reserved 3pids are not removed from the MAU table - # XXX some of this is redundant. poking things into the config shouldn't - # work, and in any case it's not obvious what we expect to happen when - # we advance the reactor. - self.hs.config.server.max_mau_value = 0 - self.reactor.advance(FORTY_DAYS) - self.hs.config.server.max_mau_value = 5 - - self.get_success(self.store.reap_monthly_active_users()) - - active_count = self.get_success(self.store.get_monthly_active_count()) - self.assertEqual(active_count, user_num) - - # Add some more users and check they are counted as active - ru_count = 2 - - self.get_success(self.store.upsert_monthly_active_user("@ru1:server")) - self.get_success(self.store.upsert_monthly_active_user("@ru2:server")) - - active_count = self.get_success(self.store.get_monthly_active_count()) - self.assertEqual(active_count, user_num + ru_count) - - # now run the reaper and check that the number of active users is reduced - # to max_mau_value - self.get_success(self.store.reap_monthly_active_users()) - - active_count = self.get_success(self.store.get_monthly_active_count()) - self.assertEqual(active_count, 3) - def test_can_insert_and_count_mau(self) -> None: count = self.get_success(self.store.get_monthly_active_count()) self.assertEqual(count, 0) @@ -206,49 +118,6 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): count = self.get_success(self.store.get_monthly_active_count()) self.assertEqual(count, 0) - # Note that below says mau_limit (no s), this is the name of the config - # value, although it gets stored on the config object as mau_limits. - @override_config({"max_mau_value": 5, "mau_limit_reserved_threepids": gen_3pids(5)}) - def test_reap_monthly_active_users_reserved_users(self) -> None: - """Tests that reaping correctly handles reaping where reserved users are - present""" - threepids = self.hs.config.server.mau_limits_reserved_threepids - initial_users = len(threepids) - reserved_user_number = initial_users - 1 - for i in range(initial_users): - user = "@user%d:server" % i - email = "user%d@matrix.org" % i - - self.get_success(self.store.upsert_monthly_active_user(user)) - - # Need to ensure that the most recent entries in the - # monthly_active_users table are reserved - now = int(self.hs.get_clock().time_msec()) - if i != 0: - self.get_success( - self.store.register_user(user_id=user, password_hash=None) - ) - self.get_success( - self.store.user_add_threepid(user, "email", email, now, now) - ) - - self.get_success( - self.store.db_pool.runInteraction( - "initialise", self.store._initialise_reserved_users, threepids - ) - ) - - count = self.get_success(self.store.get_monthly_active_count()) - self.assertEqual(count, initial_users) - - users = self.get_success(self.store.get_registered_reserved_users()) - self.assertEqual(len(users), reserved_user_number) - - self.get_success(self.store.reap_monthly_active_users()) - - count = self.get_success(self.store.get_monthly_active_count()) - self.assertEqual(count, self.hs.config.server.max_mau_value) - def test_populate_monthly_users_is_guest(self) -> None: # Test that guest users are not added to mau list user_id = "@user_id:host" @@ -289,46 +158,6 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.store.upsert_monthly_active_user.assert_not_called() - def test_get_reserved_real_user_account(self) -> None: - # Test no reserved users, or reserved threepids - users = self.get_success(self.store.get_registered_reserved_users()) - self.assertEqual(len(users), 0) - - # Test reserved users but no registered users - user1 = "@user1:example.com" - user2 = "@user2:example.com" - - user1_email = "user1@example.com" - user2_email = "user2@example.com" - threepids = [ - {"medium": "email", "address": user1_email}, - {"medium": "email", "address": user2_email}, - ] - - self.hs.config.server.mau_limits_reserved_threepids = threepids - d = self.store.db_pool.runInteraction( - "initialise", self.store._initialise_reserved_users, threepids - ) - self.get_success(d) - - users = self.get_success(self.store.get_registered_reserved_users()) - self.assertEqual(len(users), 0) - - # Test reserved registered users - self.get_success(self.store.register_user(user_id=user1, password_hash=None)) - self.get_success(self.store.register_user(user_id=user2, password_hash=None)) - - now = int(self.hs.get_clock().time_msec()) - self.get_success( - self.store.user_add_threepid(user1, "email", user1_email, now, now) - ) - self.get_success( - self.store.user_add_threepid(user2, "email", user2_email, now, now) - ) - - users = self.get_success(self.store.get_registered_reserved_users()) - self.assertEqual(len(users), len(threepids)) - def test_support_user_not_add_to_mau_limits(self) -> None: support_user_id = "@support:test" diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py
index 080d5640a5..0aa14fd1f4 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py
@@ -23,6 +23,8 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import NotFoundError, SynapseError from synapse.rest.client import room from synapse.server import HomeServer +from synapse.types.state import StateFilter +from synapse.types.storage import _BackgroundUpdates from synapse.util import Clock from tests.unittest import HomeserverTestCase @@ -40,6 +42,8 @@ class PurgeTests(HomeserverTestCase): self.room_id = self.helper.create_room_as(self.user_id) self.store = hs.get_datastores().main + self.state_store = hs.get_datastores().state + self.state_deletion_store = hs.get_datastores().state_deletion self._storage_controllers = self.hs.get_storage_controllers() def test_purge_history(self) -> None: @@ -128,3 +132,328 @@ class PurgeTests(HomeserverTestCase): self.store._invalidate_local_get_event_cache(create_event.event_id) self.get_failure(self.store.get_event(create_event.event_id), NotFoundError) self.get_failure(self.store.get_event(first["event_id"]), NotFoundError) + + def test_purge_history_deletes_state_groups(self) -> None: + """Test that unreferenced state groups get cleaned up after purge""" + + # Send four state changes to the room. + first = self.helper.send_state( + self.room_id, event_type="m.foo", body={"test": 1} + ) + second = self.helper.send_state( + self.room_id, event_type="m.foo", body={"test": 2} + ) + third = self.helper.send_state( + self.room_id, event_type="m.foo", body={"test": 3} + ) + last = self.helper.send_state( + self.room_id, event_type="m.foo", body={"test": 4} + ) + + # Get references to the state groups + event_to_groups = self.get_success( + self.store._get_state_group_for_events( + [ + first["event_id"], + second["event_id"], + third["event_id"], + last["event_id"], + ] + ) + ) + + # Get the topological token + token = self.get_success( + self.store.get_topological_token_for_event(last["event_id"]) + ) + token_str = self.get_success(token.to_string(self.hs.get_datastores().main)) + + # Purge everything before this topological token + self.get_success( + self._storage_controllers.purge_events.purge_history( + self.room_id, token_str, True + ) + ) + + # Advance so that the background jobs to delete the state groups runs + self.reactor.advance( + 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000 + ) + + # We expect all the state groups associated with events above, except + # the last one, should return no state. + state_groups = self.get_success( + self.state_store._get_state_groups_from_groups( + list(event_to_groups.values()), StateFilter.all() + ) + ) + first_state = state_groups[event_to_groups[first["event_id"]]] + second_state = state_groups[event_to_groups[second["event_id"]]] + third_state = state_groups[event_to_groups[third["event_id"]]] + last_state = state_groups[event_to_groups[last["event_id"]]] + + self.assertEqual(first_state, {}) + self.assertEqual(second_state, {}) + self.assertEqual(third_state, {}) + self.assertNotEqual(last_state, {}) + + def test_purge_unreferenced_state_group(self) -> None: + """Test that purging a room also gets rid of unreferenced state groups + it encounters during the purge. + + This is important, as otherwise these unreferenced state groups get + "de-deltaed" during the purge process, consuming lots of disk space. + """ + + self.helper.send(self.room_id, body="test1") + state1 = self.helper.send_state( + self.room_id, "org.matrix.test", body={"number": 2} + ) + state2 = self.helper.send_state( + self.room_id, "org.matrix.test", body={"number": 3} + ) + self.helper.send(self.room_id, body="test4") + last = self.helper.send(self.room_id, body="test5") + + # Create an unreferenced state group that has a prev group of one of the + # to-be-purged events. + prev_group = self.get_success( + self.store._get_state_group_for_event(state1["event_id"]) + ) + unreferenced_state_group = self.get_success( + self.state_store.store_state_group( + event_id=last["event_id"], + room_id=self.room_id, + prev_group=prev_group, + delta_ids={("org.matrix.test", ""): state2["event_id"]}, + current_state_ids=None, + ) + ) + + # Get the topological token + token = self.get_success( + self.store.get_topological_token_for_event(last["event_id"]) + ) + token_str = self.get_success(token.to_string(self.hs.get_datastores().main)) + + # Purge everything before this topological token + self.get_success( + self._storage_controllers.purge_events.purge_history( + self.room_id, token_str, True + ) + ) + + # Advance so that the background jobs to delete the state groups runs + self.reactor.advance( + 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000 + ) + + # We expect that the unreferenced state group has been deleted from all tables. + row = self.get_success( + self.state_store.db_pool.simple_select_one_onecol( + table="state_groups", + keyvalues={"id": unreferenced_state_group}, + retcol="id", + allow_none=True, + desc="test_purge_unreferenced_state_group", + ) + ) + self.assertIsNone(row) + + row = self.get_success( + self.state_store.db_pool.simple_select_one_onecol( + table="state_groups_state", + keyvalues={"state_group": unreferenced_state_group}, + retcol="state_group", + allow_none=True, + desc="test_purge_unreferenced_state_group", + ) + ) + self.assertIsNone(row) + + row = self.get_success( + self.state_store.db_pool.simple_select_one_onecol( + table="state_group_edges", + keyvalues={"state_group": unreferenced_state_group}, + retcol="state_group", + allow_none=True, + desc="test_purge_unreferenced_state_group", + ) + ) + self.assertIsNone(row) + + row = self.get_success( + self.state_store.db_pool.simple_select_one_onecol( + table="state_groups_pending_deletion", + keyvalues={"state_group": unreferenced_state_group}, + retcol="state_group", + allow_none=True, + desc="test_purge_unreferenced_state_group", + ) + ) + self.assertIsNone(row) + + # We expect there to now only be one state group for the room, which is + # the state group of the last event (as the only outlier). + state_groups = self.get_success( + self.state_store.db_pool.simple_select_onecol( + table="state_groups", + keyvalues={"room_id": self.room_id}, + retcol="id", + desc="test_purge_unreferenced_state_group", + ) + ) + self.assertEqual(len(state_groups), 1) + + def test_clear_unreferenced_state_groups(self) -> None: + """Test that any unreferenced state groups are automatically cleaned up.""" + + self.helper.send(self.room_id, body="test1") + state1 = self.helper.send_state( + self.room_id, "org.matrix.test", body={"number": 2} + ) + # Create enough state events to require multiple batches of + # mark_unreferenced_state_groups_for_deletion_bg_update to be run. + for i in range(200): + self.helper.send_state(self.room_id, "org.matrix.test", body={"number": i}) + self.helper.send(self.room_id, body="test4") + last = self.helper.send(self.room_id, body="test5") + + # Create an unreferenced state group that has no prev group. + unreferenced_free_state_group = self.get_success( + self.state_store.store_state_group( + event_id=last["event_id"], + room_id=self.room_id, + prev_group=None, + delta_ids={("org.matrix.test", ""): state1["event_id"]}, + current_state_ids={("org.matrix.test", ""): ""}, + ) + ) + + # Create some unreferenced state groups that have a prev group of one of the + # existing state groups. + prev_group = self.get_success( + self.store._get_state_group_for_event(state1["event_id"]) + ) + unreferenced_end_state_group = self.get_success( + self.state_store.store_state_group( + event_id=last["event_id"], + room_id=self.room_id, + prev_group=prev_group, + delta_ids={("org.matrix.test", ""): state1["event_id"]}, + current_state_ids=None, + ) + ) + another_unreferenced_end_state_group = self.get_success( + self.state_store.store_state_group( + event_id=last["event_id"], + room_id=self.room_id, + prev_group=unreferenced_end_state_group, + delta_ids={("org.matrix.test", ""): state1["event_id"]}, + current_state_ids=None, + ) + ) + + # Add some other unreferenced state groups which lead to a referenced state + # group. + # These state groups should not get deleted. + chain_state_group = self.get_success( + self.state_store.store_state_group( + event_id=last["event_id"], + room_id=self.room_id, + prev_group=None, + delta_ids={("org.matrix.test", ""): ""}, + current_state_ids={("org.matrix.test", ""): ""}, + ) + ) + chain_state_group_2 = self.get_success( + self.state_store.store_state_group( + event_id=last["event_id"], + room_id=self.room_id, + prev_group=chain_state_group, + delta_ids={("org.matrix.test", ""): ""}, + current_state_ids=None, + ) + ) + referenced_chain_state_group = self.get_success( + self.state_store.store_state_group( + event_id=last["event_id"], + room_id=self.room_id, + prev_group=chain_state_group_2, + delta_ids={("org.matrix.test", ""): ""}, + current_state_ids=None, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "event_to_state_groups", + { + "event_id": "$new_event", + "state_group": referenced_chain_state_group, + }, + ) + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.MARK_UNREFERENCED_STATE_GROUPS_FOR_DELETION_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Advance so that the background job to delete the state groups runs + self.reactor.advance( + 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000 + ) + + # We expect that the unreferenced free state group has been deleted. + row = self.get_success( + self.state_store.db_pool.simple_select_one_onecol( + table="state_groups", + keyvalues={"id": unreferenced_free_state_group}, + retcol="id", + allow_none=True, + desc="test_purge_unreferenced_state_group", + ) + ) + self.assertIsNone(row) + + # We expect that both unreferenced end state groups have been deleted. + row = self.get_success( + self.state_store.db_pool.simple_select_one_onecol( + table="state_groups", + keyvalues={"id": unreferenced_end_state_group}, + retcol="id", + allow_none=True, + desc="test_purge_unreferenced_state_group", + ) + ) + self.assertIsNone(row) + row = self.get_success( + self.state_store.db_pool.simple_select_one_onecol( + table="state_groups", + keyvalues={"id": another_unreferenced_end_state_group}, + retcol="id", + allow_none=True, + desc="test_purge_unreferenced_state_group", + ) + ) + self.assertIsNone(row) + + # We expect there to now only be one state group for the room, which is + # the state group of the last event (as the only outlier). + state_groups = self.get_success( + self.state_store.db_pool.simple_select_onecol( + table="state_groups", + keyvalues={"room_id": self.room_id}, + retcol="id", + desc="test_purge_unreferenced_state_group", + ) + ) + self.assertEqual(len(state_groups), 210) diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py
index 14e3871dc1..ebad759fd1 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py
@@ -21,7 +21,6 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import UserTypes -from synapse.api.errors import ThreepidValidationError from synapse.server import HomeServer from synapse.types import JsonDict, UserID, UserInfo from synapse.util import Clock @@ -145,39 +144,6 @@ class RegistrationStoreTestCase(HomeserverTestCase): res = self.get_success(self.store.is_support_user(SUPPORT_USER)) self.assertTrue(res) - def test_3pid_inhibit_invalid_validation_session_error(self) -> None: - """Tests that enabling the configuration option to inhibit 3PID errors on - /requestToken also inhibits validation errors caused by an unknown session ID. - """ - - # Check that, with the config setting set to false (the default value), a - # validation error is caused by the unknown session ID. - e = self.get_failure( - self.store.validate_threepid_session( - "fake_sid", - "fake_client_secret", - "fake_token", - 0, - ), - ThreepidValidationError, - ) - self.assertEqual(e.value.msg, "Unknown session_id", e) - - # Set the config setting to true. - self.store._ignore_unknown_session_error = True - - # Check that now the validation error is caused by the token not matching. - e = self.get_failure( - self.store.validate_threepid_session( - "fake_sid", - "fake_client_secret", - "fake_token", - 0, - ), - ThreepidValidationError, - ) - self.assertEqual(e.value.msg, "Validation token not found or has expired", e) - class ApprovalRequiredRegistrationTestCase(HomeserverTestCase): def default_config(self) -> JsonDict: diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py
index 418b556108..330fea0e62 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py
@@ -24,7 +24,7 @@ from typing import List, Optional, Tuple, cast from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.constants import EventContentFields, EventTypes, JoinRules, Membership from synapse.api.room_versions import RoomVersions from synapse.rest import admin from synapse.rest.admin import register_servlets_for_client_rest_resource @@ -38,6 +38,7 @@ from synapse.util import Clock from tests import unittest from tests.server import TestHomeServer from tests.test_utils import event_injection +from tests.test_utils.event_injection import create_event from tests.unittest import skip_unless logger = logging.getLogger(__name__) @@ -54,6 +55,10 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): # We can't test the RoomMemberStore on its own without the other event # storage logic self.store = hs.get_datastores().main + self.state_handler = self.hs.get_state_handler() + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.persistence = persistence self.u_alice = self.register_user("alice", "pass") self.t_alice = self.login("alice", "pass") @@ -220,31 +225,166 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): ) def test_join_locally_forgotten_room(self) -> None: - """Tests if a user joins a forgotten room the room is not forgotten anymore.""" - self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) - self.assertFalse( - self.get_success(self.store.is_locally_forgotten_room(self.room)) + """ + Tests if a user joins a forgotten room, the room is not forgotten anymore. + + Since a room can't be re-joined if everyone has left. This can only happen with + a room with remote users in it. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote room + creator = "@user:other" + room_id = "!foo:other" + room_version = RoomVersions.V10 + shared_kwargs = { + "room_id": room_id, + "room_version": room_version.identifier, + } + + create_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[], + type=EventTypes.Create, + state_key="", + content={ + # The `ROOM_CREATOR` field could be removed if we used a room + # version > 10 (in favor of relying on `sender`) + EventContentFields.ROOM_CREATOR: creator, + EventContentFields.ROOM_VERSION: room_version.identifier, + }, + sender=creator, + **shared_kwargs, + ) + ) + creator_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[create_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Member, + state_key=creator, + content={"membership": Membership.JOIN}, + sender=creator, + **shared_kwargs, + ) ) - # after leaving and forget the room, it is forgotten - self.get_success( - event_injection.inject_member_event( - self.hs, self.room, self.u_alice, "leave" + remote_events_and_contexts = [ + create_tuple, + creator_tuple, + ] + + # Ensure the local HS knows the room version + self.get_success(self.store.store_room(room_id, creator, False, room_version)) + + # Persist these events as backfilled events. + for event, context in remote_events_and_contexts: + self.get_success( + self.persistence.persist_event(event, context, backfilled=True) + ) + + # Now we join the local user to the room. We want to make this feel as close to + # the real `process_remote_join()` as possible but we'd like to avoid some of + # the auth checks that would be done in the real code. + # + # FIXME: The test was originally written using this less-real + # `persist_event(...)` shortcut but it would be nice to use the real remote join + # process in a `FederatingHomeserverTestCase`. + flawed_join_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[creator_tuple[0].event_id], + # This doesn't work correctly to create an `EventContext` that includes + # both of these state events. I assume it's because we're working on our + # local homeserver which has the remote state set as `outlier`. We have + # to create our own EventContext below to get this right. + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.JOIN}, + sender=user1_id, + **shared_kwargs, ) ) - self.get_success(self.store.forget(self.u_alice, self.room)) - self.assertTrue( - self.get_success(self.store.is_locally_forgotten_room(self.room)) + # We have to create our own context to get the state set correctly. If we use + # the `EventContext` from the `flawed_join_tuple`, the `current_state_events` + # table will only have the join event in it which should never happen in our + # real server. + join_event = flawed_join_tuple[0] + join_context = self.get_success( + self.state_handler.compute_event_context( + join_event, + state_ids_before_event={ + (e.type, e.state_key): e.event_id for e in [create_tuple[0]] + }, + partial_state=False, + ) ) + self.get_success(self.persistence.persist_event(join_event, join_context)) - # after rejoin the room is not forgotten anymore - self.get_success( - event_injection.inject_member_event( - self.hs, self.room, self.u_alice, "join" + # The room shouldn't be forgotten because the local user just joined + self.assertFalse( + self.get_success(self.store.is_locally_forgotten_room(room_id)) + ) + + # After all of the local users (there is only user1) leave and forgetting the + # room, it is forgotten + user1_leave_response = self.helper.leave(room_id, user1_id, tok=user1_tok) + user1_leave_event = self.get_success( + self.store.get_event(user1_leave_response["event_id"]) + ) + self.get_success(self.store.forget(user1_id, room_id)) + self.assertTrue(self.get_success(self.store.is_locally_forgotten_room(room_id))) + + # Join the local user to the room (again). We want to make this feel as close to + # the real `process_remote_join()` as possible but we'd like to avoid some of + # the auth checks that would be done in the real code. + # + # FIXME: The test was originally written using this less-real + # `event_injection.inject_member_event(...)` shortcut but it would be nice to + # use the real remote join process in a `FederatingHomeserverTestCase`. + flawed_join_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[user1_leave_response["event_id"]], + # This doesn't work correctly to create an `EventContext` that includes + # both of these state events. I assume it's because we're working on our + # local homeserver which has the remote state set as `outlier`. We have + # to create our own EventContext below to get this right. + auth_event_ids=[ + create_tuple[0].event_id, + user1_leave_response["event_id"], + ], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.JOIN}, + sender=user1_id, + **shared_kwargs, + ) + ) + # We have to create our own context to get the state set correctly. If we use + # the `EventContext` from the `flawed_join_tuple`, the `current_state_events` + # table will only have the join event in it which should never happen in our + # real server. + join_event = flawed_join_tuple[0] + join_context = self.get_success( + self.state_handler.compute_event_context( + join_event, + state_ids_before_event={ + (e.type, e.state_key): e.event_id + for e in [create_tuple[0], user1_leave_event] + }, + partial_state=False, ) ) + self.get_success(self.persistence.persist_event(join_event, join_context)) + + # After the local user rejoins the remote room, it isn't forgotten anymore self.assertFalse( - self.get_success(self.store.is_locally_forgotten_room(self.room)) + self.get_success(self.store.is_locally_forgotten_room(room_id)) ) diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py new file mode 100644
index 0000000000..53212f7c45 --- /dev/null +++ b/tests/storage/test_sliding_sync_tables.py
@@ -0,0 +1,5119 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# +# Originally licensed under the Apache License, Version 2.0: +# <http://www.apache.org/licenses/LICENSE-2.0>. +# +# [This file includes modifications made by New Vector Limited] +# +# +import logging +from typing import Dict, List, Optional, Tuple, cast + +import attr +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.constants import EventContentFields, EventTypes, Membership, RoomTypes +from synapse.api.room_versions import RoomVersions +from synapse.events import EventBase, StrippedStateEvent, make_event_from_dict +from synapse.events.snapshot import EventContext +from synapse.rest import admin +from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.storage.databases.main.events import DeltaState +from synapse.storage.databases.main.events_bg_updates import ( + _resolve_stale_data_in_sliding_sync_joined_rooms_table, + _resolve_stale_data_in_sliding_sync_membership_snapshots_table, +) +from synapse.types import create_requester +from synapse.types.storage import _BackgroundUpdates +from synapse.util import Clock + +from tests.test_utils.event_injection import create_event +from tests.unittest import HomeserverTestCase + +logger = logging.getLogger(__name__) + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _SlidingSyncJoinedRoomResult: + room_id: str + # `event_stream_ordering` is only optional to allow easier semantics when we make + # expected objects from `event.internal_metadata.stream_ordering`. in the tests. + # `event.internal_metadata.stream_ordering` is marked optional because it only + # exists for persisted events but in the context of these tests, we're only working + # with persisted events and we're making comparisons so we will find any mismatch. + event_stream_ordering: Optional[int] + bump_stamp: Optional[int] + room_type: Optional[str] + room_name: Optional[str] + is_encrypted: bool + tombstone_successor_room_id: Optional[str] + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _SlidingSyncMembershipSnapshotResult: + room_id: str + user_id: str + sender: str + membership_event_id: str + membership: str + # `event_stream_ordering` is only optional to allow easier semantics when we make + # expected objects from `event.internal_metadata.stream_ordering`. in the tests. + # `event.internal_metadata.stream_ordering` is marked optional because it only + # exists for persisted events but in the context of these tests, we're only working + # with persisted events and we're making comparisons so we will find any mismatch. + event_stream_ordering: Optional[int] + has_known_state: bool + room_type: Optional[str] + room_name: Optional[str] + is_encrypted: bool + tombstone_successor_room_id: Optional[str] + # Make this default to "not forgotten" because it doesn't apply to many tests and we + # don't want to force all of the tests to deal with it. + forgotten: bool = False + + +class SlidingSyncTablesTestCaseBase(HomeserverTestCase): + """ + Helpers to deal with testing that the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` database tables are + populated correctly. + """ + + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.storage_controllers = hs.get_storage_controllers() + persist_events_store = self.hs.get_datastores().persist_events + assert persist_events_store is not None + self.persist_events_store = persist_events_store + + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + self.persist_controller = persist_controller + + self.state_handler = self.hs.get_state_handler() + + def _get_sliding_sync_joined_rooms(self) -> Dict[str, _SlidingSyncJoinedRoomResult]: + """ + Return the rows from the `sliding_sync_joined_rooms` table. + + Returns: + Mapping from room_id to _SlidingSyncJoinedRoomResult. + """ + rows = cast( + List[Tuple[str, int, int, str, str, bool, str]], + self.get_success( + self.store.db_pool.simple_select_list( + "sliding_sync_joined_rooms", + None, + retcols=( + "room_id", + "event_stream_ordering", + "bump_stamp", + "room_type", + "room_name", + "is_encrypted", + "tombstone_successor_room_id", + ), + ), + ), + ) + + return { + row[0]: _SlidingSyncJoinedRoomResult( + room_id=row[0], + event_stream_ordering=row[1], + bump_stamp=row[2], + room_type=row[3], + room_name=row[4], + is_encrypted=bool(row[5]), + tombstone_successor_room_id=row[6], + ) + for row in rows + } + + def _get_sliding_sync_membership_snapshots( + self, + ) -> Dict[Tuple[str, str], _SlidingSyncMembershipSnapshotResult]: + """ + Return the rows from the `sliding_sync_membership_snapshots` table. + + Returns: + Mapping from the (room_id, user_id) to _SlidingSyncMembershipSnapshotResult. + """ + rows = cast( + List[Tuple[str, str, str, str, str, int, int, bool, str, str, bool, str]], + self.get_success( + self.store.db_pool.simple_select_list( + "sliding_sync_membership_snapshots", + None, + retcols=( + "room_id", + "user_id", + "sender", + "membership_event_id", + "membership", + "forgotten", + "event_stream_ordering", + "has_known_state", + "room_type", + "room_name", + "is_encrypted", + "tombstone_successor_room_id", + ), + ), + ), + ) + + return { + (row[0], row[1]): _SlidingSyncMembershipSnapshotResult( + room_id=row[0], + user_id=row[1], + sender=row[2], + membership_event_id=row[3], + membership=row[4], + forgotten=bool(row[5]), + event_stream_ordering=row[6], + has_known_state=bool(row[7]), + room_type=row[8], + room_name=row[9], + is_encrypted=bool(row[10]), + tombstone_successor_room_id=row[11], + ) + for row in rows + } + + _remote_invite_count: int = 0 + + def _create_remote_invite_room_for_user( + self, + invitee_user_id: str, + unsigned_invite_room_state: Optional[List[StrippedStateEvent]], + ) -> Tuple[str, EventBase]: + """ + Create a fake invite for a remote room and persist it. + + We don't have any state for these kind of rooms and can only rely on the + stripped state included in the unsigned portion of the invite event to identify + the room. + + Args: + invitee_user_id: The person being invited + unsigned_invite_room_state: List of stripped state events to assist the + receiver in identifying the room. + + Returns: + The room ID of the remote invite room and the persisted remote invite event. + """ + invite_room_id = f"!test_room{self._remote_invite_count}:remote_server" + + invite_event_dict = { + "room_id": invite_room_id, + "sender": "@inviter:remote_server", + "state_key": invitee_user_id, + "depth": 1, + "origin_server_ts": 1, + "type": EventTypes.Member, + "content": {"membership": Membership.INVITE}, + "auth_events": [], + "prev_events": [], + } + if unsigned_invite_room_state is not None: + serialized_stripped_state_events = [] + for stripped_event in unsigned_invite_room_state: + serialized_stripped_state_events.append( + { + "type": stripped_event.type, + "state_key": stripped_event.state_key, + "sender": stripped_event.sender, + "content": stripped_event.content, + } + ) + + invite_event_dict["unsigned"] = { + "invite_room_state": serialized_stripped_state_events + } + + invite_event = make_event_from_dict( + invite_event_dict, + room_version=RoomVersions.V10, + ) + invite_event.internal_metadata.outlier = True + invite_event.internal_metadata.out_of_band_membership = True + + self.get_success( + self.store.maybe_store_room_on_outlier_membership( + room_id=invite_room_id, room_version=invite_event.room_version + ) + ) + context = EventContext.for_outlier(self.hs.get_storage_controllers()) + persisted_event, _, _ = self.get_success( + self.persist_controller.persist_event(invite_event, context) + ) + + self._remote_invite_count += 1 + + return invite_room_id, persisted_event + + def _retract_remote_invite_for_user( + self, + user_id: str, + remote_room_id: str, + ) -> EventBase: + """ + Create a fake invite retraction for a remote room and persist it. + + Retracting an invite just means the person is no longer invited to the room. + This is done by someone with proper power levels kicking the user from the room. + A kick shows up as a leave event for a given person with a different `sender`. + + Args: + user_id: The person who was invited and we're going to retract the + invite for. + remote_room_id: The room ID that the invite was for. + + Returns: + The persisted leave (kick) event. + """ + + kick_event_dict = { + "room_id": remote_room_id, + "sender": "@inviter:remote_server", + "state_key": user_id, + "depth": 1, + "origin_server_ts": 1, + "type": EventTypes.Member, + "content": {"membership": Membership.LEAVE}, + "auth_events": [], + "prev_events": [], + } + + kick_event = make_event_from_dict( + kick_event_dict, + room_version=RoomVersions.V10, + ) + kick_event.internal_metadata.outlier = True + kick_event.internal_metadata.out_of_band_membership = True + + self.get_success( + self.store.maybe_store_room_on_outlier_membership( + room_id=remote_room_id, room_version=kick_event.room_version + ) + ) + context = EventContext.for_outlier(self.hs.get_storage_controllers()) + persisted_event, _, _ = self.get_success( + self.persist_controller.persist_event(kick_event, context) + ) + + return persisted_event + + +class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase): + """ + Tests to make sure the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` database tables are + populated and updated correctly as new events are sent. + """ + + def test_joined_room_with_no_info(self) -> None: + """ + Test joined room that doesn't have a room type, encryption, or name shows up in + `sliding_sync_joined_rooms`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # History visibility just happens to be the last event sent in the room + event_stream_ordering=state_map[ + (EventTypes.RoomHistoryVisibility, "") + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_joined_room_with_info(self) -> None: + """ + Test joined encrypted room with name shows up in `sliding_sync_joined_rooms`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id1, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + # Add a tombstone + self.helper.send_state( + room_id1, + EventTypes.Tombstone, + {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"}, + tok=user2_tok, + ) + + # User1 joins the room + self.helper.join(room_id1, user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # This should be whatever is the last event in the room + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + # Even though this room does have a name, is encrypted, and has a + # tombstone, user2 is the room creator and joined at the room creation + # time which didn't have this state set yet. + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_joined_space_room_with_info(self) -> None: + """ + Test joined space room with name shows up in `sliding_sync_joined_rooms`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user2_tok, + ) + + # User1 joins the room + user1_join_response = self.helper.join(space_room_id, user1_id, tok=user1_tok) + user1_join_event_pos = self.get_success( + self.store.get_position_for_event(user1_join_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(space_room_id) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {space_room_id}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[space_room_id], + _SlidingSyncJoinedRoomResult( + room_id=space_room_id, + event_stream_ordering=user1_join_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (space_room_id, user1_id), + (space_room_id, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + # Even though this room does have a name, user2 is the room creator and + # joined at the room creation time which didn't have this state set yet. + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_joined_room_with_state_updated(self) -> None: + """ + Test state derived info in `sliding_sync_joined_rooms` is updated when the + current state is updated. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + + # User1 joins the room + user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + user1_join_event_pos = self.get_success( + self.store.get_position_for_event(user1_join_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + event_stream_ordering=user1_join_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + + # Update the room name + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room was renamed"}, + tok=user2_tok, + ) + # Encrypt the room + encrypt_room_response = self.helper.send_state( + room_id1, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + encrypt_room_event_pos = self.get_success( + self.store.get_position_for_event(encrypt_room_response["event_id"]) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + # Make sure we see the new room name + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + event_stream_ordering=encrypt_room_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room was renamed", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_joined_room_is_bumped(self) -> None: + """ + Test that `event_stream_ordering` and `bump_stamp` is updated when a new bump + event is sent (`sliding_sync_joined_rooms`). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + + # User1 joins the room + user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + user1_join_event_pos = self.get_success( + self.store.get_position_for_event(user1_join_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + event_stream_ordering=user1_join_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + user1_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + user1_snapshot, + ) + # Holds the info according to the current state when the user joined + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + user2_snapshot, + ) + + # Send a new message to bump the room + event_response = self.helper.send(room_id1, "some message", tok=user1_tok) + event_pos = self.get_success( + self.store.get_position_for_event(event_response["event_id"]) + ) + + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + # Make sure we see the new room name + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # Updated `event_stream_ordering` + event_stream_ordering=event_pos.stream, + # And since the event was a bump event, the `bump_stamp` should be updated + bump_stamp=event_pos.stream, + # The state is still the same (it didn't change) + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + user1_snapshot, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + user2_snapshot, + ) + + def test_joined_room_bump_stamp_backfill(self) -> None: + """ + Test that `bump_stamp` ignores backfilled events, i.e. events with a + negative stream ordering. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote room + creator = "@user:other" + room_id = "!foo:other" + room_version = RoomVersions.V10 + shared_kwargs = { + "room_id": room_id, + "room_version": room_version.identifier, + } + + create_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[], + type=EventTypes.Create, + state_key="", + content={ + # The `ROOM_CREATOR` field could be removed if we used a room + # version > 10 (in favor of relying on `sender`) + EventContentFields.ROOM_CREATOR: creator, + EventContentFields.ROOM_VERSION: room_version.identifier, + }, + sender=creator, + **shared_kwargs, + ) + ) + creator_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[create_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Member, + state_key=creator, + content={"membership": Membership.JOIN}, + sender=creator, + **shared_kwargs, + ) + ) + room_name_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[creator_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Name, + state_key="", + content={ + EventContentFields.ROOM_NAME: "my super duper room", + }, + sender=creator, + **shared_kwargs, + ) + ) + # We add a message event as a valid "bump type" + msg_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[room_name_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Message, + content={"body": "foo", "msgtype": "m.text"}, + sender=creator, + **shared_kwargs, + ) + ) + invite_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[msg_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.INVITE}, + sender=creator, + **shared_kwargs, + ) + ) + + remote_events_and_contexts = [ + create_tuple, + creator_tuple, + room_name_tuple, + msg_tuple, + invite_tuple, + ] + + # Ensure the local HS knows the room version + self.get_success(self.store.store_room(room_id, creator, False, room_version)) + + # Persist these events as backfilled events. + for event, context in remote_events_and_contexts: + self.get_success( + self.persist_controller.persist_event(event, context, backfilled=True) + ) + + # Now we join the local user to the room. We want to make this feel as close to + # the real `process_remote_join()` as possible but we'd like to avoid some of + # the auth checks that would be done in the real code. + # + # FIXME: The test was originally written using this less-real + # `persist_event(...)` shortcut but it would be nice to use the real remote join + # process in a `FederatingHomeserverTestCase`. + flawed_join_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[invite_tuple[0].event_id], + # This doesn't work correctly to create an `EventContext` that includes + # both of these state events. I assume it's because we're working on our + # local homeserver which has the remote state set as `outlier`. We have + # to create our own EventContext below to get this right. + auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.JOIN}, + sender=user1_id, + **shared_kwargs, + ) + ) + # We have to create our own context to get the state set correctly. If we use + # the `EventContext` from the `flawed_join_tuple`, the `current_state_events` + # table will only have the join event in it which should never happen in our + # real server. + join_event = flawed_join_tuple[0] + join_context = self.get_success( + self.state_handler.compute_event_context( + join_event, + state_ids_before_event={ + (e.type, e.state_key): e.event_id + for e in [create_tuple[0], invite_tuple[0], room_name_tuple[0]] + }, + partial_state=False, + ) + ) + join_event, _join_event_pos, _room_token = self.get_success( + self.persist_controller.persist_event(join_event, join_context) + ) + + # Make sure the tables are populated correctly + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be the last event in the room (the join membership) + event_stream_ordering=join_event.internal_metadata.stream_ordering, + # Since all of the bump events are backfilled, the `bump_stamp` should + # still be `None`. (and we will fallback to the users membership event + # position in the Sliding Sync API) + bump_stamp=None, + room_type=None, + # We still pick up state of the room even if it's backfilled + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=join_event.event_id, + membership=Membership.JOIN, + event_stream_ordering=join_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + @parameterized.expand( + # Test both an insert an upsert into the + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` to exercise + # more possibilities of things going wrong. + [ + ("insert", True), + ("upsert", False), + ] + ) + def test_joined_room_outlier_and_deoutlier( + self, description: str, should_insert: bool + ) -> None: + """ + This is a regression test. + + This is to simulate the case where an event is first persisted as an outlier + (like a remote invite) and then later persisted again to de-outlier it. The + first the time, the `outlier` is persisted with one `stream_ordering` but when + persisted again and de-outliered, it is assigned a different `stream_ordering` + that won't end up being used. Since we call + `_calculate_sliding_sync_table_changes()` before `_update_outliers_txn()` which + fixes this discrepancy (always use the `stream_ordering` from the first time it + was persisted), make sure we're not using an unreliable `stream_ordering` values + that will cause `FOREIGN KEY constraint failed` in the + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` tables. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_version = RoomVersions.V10 + room_id = self.helper.create_room_as( + user2_id, tok=user2_tok, room_version=room_version.identifier + ) + + if should_insert: + # Clear these out so we always insert + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + desc="TODO", + ) + ) + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id}, + desc="TODO", + ) + ) + + # Create a membership event (which triggers an insert into + # `sliding_sync_membership_snapshots`) + membership_event_dict = { + "type": EventTypes.Member, + "state_key": user1_id, + "sender": user1_id, + "room_id": room_id, + "content": {EventContentFields.MEMBERSHIP: Membership.JOIN}, + } + # Create a relevant state event (which triggers an insert into + # `sliding_sync_joined_rooms`) + state_event_dict = { + "type": EventTypes.Name, + "state_key": "", + "sender": user2_id, + "room_id": room_id, + "content": {EventContentFields.ROOM_NAME: "my super room"}, + } + event_dicts_to_persist = [ + membership_event_dict, + state_event_dict, + ] + + for event_dict in event_dicts_to_persist: + events_to_persist = [] + + # Create the events as an outliers + ( + event, + unpersisted_context, + ) = self.get_success( + self.hs.get_event_creation_handler().create_event( + requester=create_requester(user1_id), + event_dict=event_dict, + outlier=True, + ) + ) + # FIXME: Should we use an `EventContext.for_outlier(...)` here? + # Doesn't seem to matter for this test. + context = self.get_success(unpersisted_context.persist(event)) + events_to_persist.append((event, context)) + + # Create the event again but as an non-outlier. This will de-outlier the event + # when we persist it. + ( + event, + unpersisted_context, + ) = self.get_success( + self.hs.get_event_creation_handler().create_event( + requester=create_requester(user1_id), + event_dict=event_dict, + outlier=False, + ) + ) + context = self.get_success(unpersisted_context.persist(event)) + events_to_persist.append((event, context)) + + for event, context in events_to_persist: + self.get_success( + self.persist_controller.persist_event( + event, + context, + ) + ) + + # We're just testing that it does not explode + + def test_joined_room_meta_state_reset(self) -> None: + """ + Test that a state reset on the room name is reflected in the + `sliding_sync_joined_rooms` table. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Make sure we see the new room name + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be whatever is the last event in the room + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + user1_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + user1_snapshot, + ) + # Holds the info according to the current state when the user joined (no room + # name when the room creator joined) + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + # Mock a state reset removing the room name state from the current state + message_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[state_map[(EventTypes.Name, "")].event_id], + auth_event_ids=[ + state_map[(EventTypes.Create, "")].event_id, + state_map[(EventTypes.Member, user1_id)].event_id, + ], + type=EventTypes.Message, + content={"body": "foo", "msgtype": "m.text"}, + sender=user1_id, + room_id=room_id, + room_version=RoomVersions.V10.identifier, + ) + ) + event_chunk = [message_tuple] + self.get_success( + self.persist_events_store._persist_events_and_state_updates( + room_id, + event_chunk, + state_delta_for_room=DeltaState( + # This is the state reset part. We're removing the room name state. + to_delete=[(EventTypes.Name, "")], + to_insert={}, + ), + new_forward_extremities={message_tuple[0].event_id}, + use_negative_stream_ordering=False, + inhibit_local_membership_updates=False, + new_event_links={}, + ) + ) + + # Make sure the state reset is reflected in the `sliding_sync_joined_rooms` table + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be whatever is the last event in the room + event_stream_ordering=message_tuple[ + 0 + ].internal_metadata.stream_ordering, + bump_stamp=message_tuple[0].internal_metadata.stream_ordering, + room_type=None, + # This was state reset back to None + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + # State reset shouldn't be reflected in the `sliding_sync_membership_snapshots` + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + # Snapshots haven't changed + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + user1_snapshot, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + def test_joined_room_fully_insert_on_state_update(self) -> None: + """ + Test that when an existing room updates it's state and we don't have a + corresponding row in `sliding_sync_joined_rooms` yet, we fully-insert the row + even though only a tiny piece of state changed. + + FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + foreground update for + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + https://github.com/element-hq/synapse/issues/17623) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Add a room name + self.helper.send_state( + room_id, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user1_tok, + ) + + # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made + # it into the table. This is to simulate an existing room (before we event added + # the sliding sync tables) not being in the `sliding_sync_joined_rooms` table + # yet. + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + desc="simulate existing room not being in the sliding_sync_joined_rooms table yet", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Encrypt the room + self.helper.send_state( + room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # The room should now be in the `sliding_sync_joined_rooms` table + # (fully-inserted with all of the state values). + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be whatever is the last event in the room + event_stream_ordering=state_map[ + (EventTypes.RoomEncryption, "") + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + + def test_joined_room_nothing_if_not_in_table_when_bumped(self) -> None: + """ + Test a new message being sent in an existing room when we don't have a + corresponding row in `sliding_sync_joined_rooms` yet; either nothing should + happen or we should fully-insert the row. We currently do nothing. + + FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + foreground update for + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + https://github.com/element-hq/synapse/issues/17623) + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # Add a room name + self.helper.send_state( + room_id, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user1_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made + # it into the table. This is to simulate an existing room (before we event added + # the sliding sync tables) not being in the `sliding_sync_joined_rooms` table + # yet. + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + desc="simulate existing room not being in the sliding_sync_joined_rooms table yet", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Send a new message to bump the room + self.helper.send(room_id, "some message", tok=user1_tok) + + # Either nothing should happen or we should fully-insert the row. We currently + # do nothing for non-state events. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + def test_non_join_space_room_with_info(self) -> None: + """ + Test users who was invited shows up in `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + space_room_id = self.helper.create_room_as( + user2_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user2_tok, + ) + # Encrypt the room + self.helper.send_state( + space_room_id, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + # Add a tombstone + self.helper.send_state( + space_room_id, + EventTypes.Tombstone, + {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"}, + tok=user2_tok, + ) + + # User1 is invited to the room + user1_invited_response = self.helper.invite( + space_room_id, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_invited_event_pos = self.get_success( + self.store.get_position_for_event(user1_invited_response["event_id"]) + ) + + # Update the room name after we are invited just to make sure + # we don't update non-join memberships when the room name changes. + rename_response = self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space was renamed"}, + tok=user2_tok, + ) + rename_event_pos = self.get_success( + self.store.get_position_for_event(rename_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(space_room_id) + ) + + # User2 is still joined to the room so we should still have an entry in the + # `sliding_sync_joined_rooms` table. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {space_room_id}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[space_room_id], + _SlidingSyncJoinedRoomResult( + room_id=space_room_id, + event_stream_ordering=rename_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=RoomTypes.SPACE, + room_name="my super duper space was renamed", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (space_room_id, user1_id), + (space_room_id, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user was invited + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invited_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=user1_invited_event_pos.stream, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_invite_ban(self) -> None: + """ + Test users who have invite/ban membership in room shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 is invited to the room + user1_invited_response = self.helper.invite( + room_id1, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_invited_event_pos = self.get_success( + self.store.get_position_for_event(user1_invited_response["event_id"]) + ) + + # User3 joins the room + self.helper.join(room_id1, user3_id, tok=user3_tok) + # User3 is banned from the room + user3_ban_response = self.helper.ban( + room_id1, src=user2_id, targ=user3_id, tok=user2_tok + ) + user3_ban_event_pos = self.get_success( + self.store.get_position_for_event(user3_ban_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # User2 is still joined to the room so we should still have an entry + # in the `sliding_sync_joined_rooms` table. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + event_stream_ordering=user3_ban_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + (room_id1, user3_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user was invited + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invited_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=user1_invited_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user was banned + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user3_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user3_id, + sender=user2_id, + membership_event_id=user3_ban_response["event_id"], + membership=Membership.BAN, + event_stream_ordering=user3_ban_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_reject_invite_empty_room(self) -> None: + """ + In a room where no one is joined (`no_longer_in_room`), test rejecting an invite. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 is invited to the room + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # User2 leaves the room + user2_leave_response = self.helper.leave(room_id1, user2_id, tok=user2_tok) + user2_leave_event_pos = self.get_success( + self.store.get_position_for_event(user2_leave_response["event_id"]) + ) + + # User1 rejects the invite + user1_leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) + user1_leave_event_pos = self.get_success( + self.store.get_position_for_event(user1_leave_response["event_id"]) + ) + + # No one is joined to the room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user left + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=user1_leave_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=user1_leave_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the left + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=user2_leave_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=user2_leave_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_changing(self) -> None: + """ + Test latest snapshot evolves when membership changes (`sliding_sync_membership_snapshots`). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 is invited to the room + # ====================================================== + user1_invited_response = self.helper.invite( + room_id1, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_invited_event_pos = self.get_success( + self.store.get_position_for_event(user1_invited_response["event_id"]) + ) + + # Update the room name after the user was invited + room_name_update_response = self.helper.send_state( + room_id1, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + room_name_update_event_pos = self.get_success( + self.store.get_position_for_event(room_name_update_response["event_id"]) + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Assert joined room status + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # Latest event in the room + event_stream_ordering=room_name_update_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + # Assert membership snapshots + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user was invited + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invited_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=user1_invited_event_pos.stream, + has_known_state=True, + room_type=None, + # Room name was updated after the user was invited so we should still + # see it unset here + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + user2_snapshot, + ) + + # User1 joins the room + # ====================================================== + user1_joined_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + user1_joined_event_pos = self.get_success( + self.store.get_position_for_event(user1_joined_response["event_id"]) + ) + + # Assert joined room status + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # Latest event in the room + event_stream_ordering=user1_joined_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + # Assert membership snapshots + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=user1_joined_response["event_id"], + membership=Membership.JOIN, + event_stream_ordering=user1_joined_event_pos.stream, + has_known_state=True, + room_type=None, + # We see the update state because the user joined after the room name + # change + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + user2_snapshot, + ) + + # User1 is banned from the room + # ====================================================== + user1_ban_response = self.helper.ban( + room_id1, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_ban_event_pos = self.get_success( + self.store.get_position_for_event(user1_ban_response["event_id"]) + ) + + # Assert joined room status + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id1}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id1], + _SlidingSyncJoinedRoomResult( + room_id=room_id1, + # Latest event in the room + event_stream_ordering=user1_ban_event_pos.stream, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + # Assert membership snapshots + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user was banned + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_ban_response["event_id"], + membership=Membership.BAN, + event_stream_ordering=user1_ban_event_pos.stream, + has_known_state=True, + room_type=None, + # We see the update state because the user joined after the room name + # change + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + user2_snapshot, + ) + + def test_non_join_server_left_room(self) -> None: + """ + Test everyone local leaves the room but their leave membership still shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 joins the room + self.helper.join(room_id1, user1_id, tok=user1_tok) + + # User2 leaves the room + user2_leave_response = self.helper.leave(room_id1, user2_id, tok=user2_tok) + user2_leave_event_pos = self.get_success( + self.store.get_position_for_event(user2_leave_response["event_id"]) + ) + + # User1 leaves the room + user1_leave_response = self.helper.leave(room_id1, user1_id, tok=user1_tok) + user1_leave_event_pos = self.get_success( + self.store.get_position_for_event(user1_leave_response["event_id"]) + ) + + # No one is joined to the room anymore so we shouldn't have an entry in the + # `sliding_sync_joined_rooms` table. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # We should still see rows for the leave events (non-joins) + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id1, user1_id), + (room_id1, user2_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user1_id, + sender=user1_id, + membership_event_id=user1_leave_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=user1_leave_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id1, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id1, + user_id=user2_id, + sender=user2_id, + membership_event_id=user2_leave_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=user2_leave_event_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + @parameterized.expand( + [ + # No stripped state provided + ("none", None), + # Empty stripped state provided + ("empty", []), + ] + ) + def test_non_join_remote_invite_no_stripped_state( + self, _description: str, stripped_state: Optional[List[StrippedStateEvent]] + ) -> None: + """ + Test remote invite with no stripped state provided shows up in + `sliding_sync_membership_snapshots` with `has_known_state=False`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room without any `unsigned.invite_room_state` + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user(user1_id, stripped_state) + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=remote_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering, + # No stripped state provided + has_known_state=False, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_remote_invite_unencrypted_room(self) -> None: + """ + Test remote invite with stripped state (unencrypted room) shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper room", + }, + ), + ], + ) + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=remote_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_remote_invite_encrypted_room(self) -> None: + """ + Test remote invite with stripped state (encrypted room) shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + # This is not one of the stripped state events according to the state + # but we still handle it. + StrippedStateEvent( + type=EventTypes.Tombstone, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room", + }, + ), + # Also test a random event that we don't care about + StrippedStateEvent( + type="org.matrix.foo_state", + state_key="", + sender="@inviter:remote_server", + content={ + "foo": "qux", + }, + ), + ], + ) + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=remote_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + + def test_non_join_remote_invite_space_room(self) -> None: + """ + Test remote invite with stripped state (encrypted space room with name) shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + # Specify that it is a space room + EventContentFields.ROOM_TYPE: RoomTypes.SPACE, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper space", + }, + ), + ], + ) + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=remote_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=remote_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_reject_remote_invite(self) -> None: + """ + Test rejected remote invite (user decided to leave the room) inherits meta data + from when the remote invite stripped state and shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + ) + + # User1 decides to leave the room (reject the invite) + user1_leave_response = self.helper.leave( + remote_invite_room_id, user1_id, tok=user1_tok + ) + user1_leave_pos = self.get_success( + self.store.get_position_for_event(user1_leave_response["event_id"]) + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=user1_leave_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=user1_leave_pos.stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_retracted_remote_invite(self) -> None: + """ + Test retracted remote invite (Remote inviter kicks the person who was invited) + inherits meta data from when the remote invite stripped state and shows up in + `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote invite room with some `unsigned.invite_room_state` + # indicating that the room is encrypted. + remote_invite_room_id, remote_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + ) + + # `@inviter:remote_server` decides to retract the invite (kicks the user). + # (Note: A kick is just a leave event with a different sender) + remote_invite_retraction_event = self._retract_remote_invite_for_user( + user_id=user1_id, + remote_room_id=remote_invite_room_id, + ) + + # No one local is joined to the remote room + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (remote_invite_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (remote_invite_room_id, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=remote_invite_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=remote_invite_retraction_event.event_id, + membership=Membership.LEAVE, + event_stream_ordering=remote_invite_retraction_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + + def test_non_join_state_reset(self) -> None: + """ + Test a state reset that removes someone from the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Make sure we see the new room name + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be whatever is the last event in the room + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + user1_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + user1_snapshot, + ) + # Holds the info according to the current state when the user joined (no room + # name when the room creator joined) + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + # Mock a state reset removing the membership for user1 in the current state + message_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[state_map[(EventTypes.Name, "")].event_id], + auth_event_ids=[ + state_map[(EventTypes.Create, "")].event_id, + state_map[(EventTypes.Member, user1_id)].event_id, + ], + type=EventTypes.Message, + content={"body": "foo", "msgtype": "m.text"}, + sender=user1_id, + room_id=room_id, + room_version=RoomVersions.V10.identifier, + ) + ) + event_chunk = [message_tuple] + self.get_success( + self.persist_events_store._persist_events_and_state_updates( + room_id, + event_chunk, + state_delta_for_room=DeltaState( + # This is the state reset part. We're removing the room name state. + to_delete=[(EventTypes.Member, user1_id)], + to_insert={}, + ), + new_forward_extremities={message_tuple[0].event_id}, + use_negative_stream_ordering=False, + inhibit_local_membership_updates=False, + new_event_links={}, + ) + ) + + # State reset on membership doesn't affect the`sliding_sync_joined_rooms` table + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be whatever is the last event in the room + event_stream_ordering=message_tuple[ + 0 + ].internal_metadata.stream_ordering, + bump_stamp=message_tuple[0].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + # State reset on membership should remove the user's snapshot + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + # We shouldn't see user1 in the snapshots table anymore + (room_id, user2_id), + }, + exact=True, + ) + # Snapshot for user2 hasn't changed + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + def test_membership_snapshot_forget(self) -> None: + """ + Test forgetting a room will update `sliding_sync_membership_snapshots` + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + # User1 leaves the room (we have to leave in order to forget the room) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + + # Check on the `sliding_sync_membership_snapshots` table (nothing should be + # forgotten yet) + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + user1_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.LEAVE, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + # Room is not forgotten + forgotten=False, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + user1_snapshot, + ) + # Holds the info according to the current state when the user joined + user2_snapshot = _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + # Forget the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Check on the `sliding_sync_membership_snapshots` table + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + # Room is now forgotten for user1 + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + attr.evolve(user1_snapshot, forgotten=True), + ) + # Nothing changed for user2 + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + user2_snapshot, + ) + + def test_membership_snapshot_missing_forget( + self, + ) -> None: + """ + Test forgetting a room with no existing row in `sliding_sync_membership_snapshots`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + # User1 leaves the room (we have to leave in order to forget the room) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id,), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_missing", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Forget the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # It doesn't explode + + # We still shouldn't find anything in the table because nothing has re-created them + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + +class SlidingSyncTablesBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase): + """ + Test the background updates that populate the `sliding_sync_joined_rooms` and + `sliding_sync_membership_snapshots` tables. + """ + + def test_joined_background_update_missing(self) -> None: + """ + Test that the background update for `sliding_sync_joined_rooms` populates missing rows + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_no_info = self.helper.create_room_as(user1_id, tok=user1_tok) + + room_id_with_info = self.helper.create_room_as(user1_id, tok=user1_tok) + # Add a room name + self.helper.send_state( + room_id_with_info, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user1_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id_with_info, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user1_tok, + ) + + # Clean-up the `sliding_sync_joined_rooms` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_joined_rooms", + column="room_id", + iterable=(room_id_no_info, room_id_with_info, space_room_id), + keyvalues={}, + desc="sliding_sync_joined_rooms.test_joined_background_update_missing", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background updates. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_JOINED_ROOMS_BG_UPDATE, + "progress_json": "{}", + "depends_on": _BackgroundUpdates.SLIDING_SYNC_PREFILL_JOINED_ROOMS_TO_RECALCULATE_TABLE_BG_UPDATE, + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id_no_info, room_id_with_info, space_room_id}, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id_no_info) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id_no_info], + _SlidingSyncJoinedRoomResult( + room_id=room_id_no_info, + # History visibility just happens to be the last event sent in the room + event_stream_ordering=state_map[ + (EventTypes.RoomHistoryVisibility, "") + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id_with_info) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id_with_info], + _SlidingSyncJoinedRoomResult( + room_id=room_id_with_info, + # Lastest event sent in the room + event_stream_ordering=state_map[ + (EventTypes.RoomEncryption, "") + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(space_room_id) + ) + self.assertEqual( + sliding_sync_joined_rooms_results[space_room_id], + _SlidingSyncJoinedRoomResult( + room_id=space_room_id, + # Lastest event sent in the room + event_stream_ordering=state_map[ + (EventTypes.Name, "") + ].internal_metadata.stream_ordering, + bump_stamp=state_map[ + (EventTypes.Create, "") + ].internal_metadata.stream_ordering, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_joined(self) -> None: + """ + Test that the background update for `sliding_sync_membership_snapshots` + populates missing rows for join memberships. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_no_info = self.helper.create_room_as(user1_id, tok=user1_tok) + + room_id_with_info = self.helper.create_room_as(user1_id, tok=user1_tok) + # Add a room name + self.helper.send_state( + room_id_with_info, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user1_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id_with_info, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user1_tok, + ) + # Add a tombstone + self.helper.send_state( + room_id_with_info, + EventTypes.Tombstone, + {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"}, + tok=user1_tok, + ) + + space_room_id = self.helper.create_room_as( + user1_id, + tok=user1_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user1_tok, + ) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id_no_info, room_id_with_info, space_room_id), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_joined", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id_no_info, user1_id), + (room_id_with_info, user1_id), + (space_room_id, user1_id), + }, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id_no_info) + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_no_info, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id_with_info) + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_with_info, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_with_info, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(space_room_id) + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_local_invite(self) -> None: + """ + Test that the background update for `sliding_sync_membership_snapshots` + populates missing rows for invite memberships. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_no_info = self.helper.create_room_as(user2_id, tok=user2_tok) + + room_id_with_info = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id_with_info, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id_with_info, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + # Add a tombstone + self.helper.send_state( + room_id_with_info, + EventTypes.Tombstone, + {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"}, + tok=user2_tok, + ) + + space_room_id = self.helper.create_room_as( + user1_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user2_tok, + ) + + # Invite user1 to the rooms + user1_invite_room_id_no_info_response = self.helper.invite( + room_id_no_info, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_invite_room_id_with_info_response = self.helper.invite( + room_id_with_info, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_invite_space_room_id_response = self.helper.invite( + space_room_id, src=user2_id, targ=user1_id, tok=user2_tok + ) + + # Have user2 leave the rooms to make sure that our background update is not just + # reading from `current_state_events`. For invite/knock memberships, we should + # be reading from the stripped state on the invite/knock event itself. + self.helper.leave(room_id_no_info, user2_id, tok=user2_tok) + self.helper.leave(room_id_with_info, user2_id, tok=user2_tok) + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + # Check to make sure we actually don't have any `current_state_events` for the rooms + current_state_check_rows = self.get_success( + self.store.db_pool.simple_select_many_batch( + table="current_state_events", + column="room_id", + iterable=[room_id_no_info, room_id_with_info, space_room_id], + retcols=("event_id",), + keyvalues={}, + desc="check current_state_events in test", + ) + ) + self.assertEqual(len(current_state_check_rows), 0) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id_no_info, room_id_with_info, space_room_id), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_local_invite", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + # The invite memberships for user1 + (room_id_no_info, user1_id), + (room_id_with_info, user1_id), + (space_room_id, user1_id), + # The leave memberships for user2 + (room_id_no_info, user2_id), + (room_id_with_info, user2_id), + (space_room_id, user2_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_no_info, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invite_room_id_no_info_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_invite_room_id_no_info_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_with_info, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_with_info, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invite_room_id_with_info_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_invite_room_id_with_info_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + # The tombstone isn't showing here ("another_room") because it's not one + # of the stripped events that we hand out as part of the invite event. + # Even though we handle this scenario from other remote homservers, + # Synapse does not include the tombstone in the invite event. + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender=user2_id, + membership_event_id=user1_invite_space_room_id_response["event_id"], + membership=Membership.INVITE, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_invite_space_room_id_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_remote_invite( + self, + ) -> None: + """ + Test that the background update for `sliding_sync_membership_snapshots` + populates missing rows for remote invites (out-of-band memberships). + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_unknown_state, room_id_unknown_state_invite_event = ( + self._create_remote_invite_room_for_user(user1_id, None) + ) + + room_id_no_info, room_id_no_info_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + ], + ) + ) + + room_id_with_info, room_id_with_info_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper room", + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + ) + + space_room_id, space_room_id_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + EventContentFields.ROOM_TYPE: RoomTypes.SPACE, + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper space", + }, + ), + ], + ) + ) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=( + room_id_unknown_state, + room_id_no_info, + room_id_with_info, + space_room_id, + ), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_remote_invite", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + # The invite memberships for user1 + (room_id_unknown_state, user1_id), + (room_id_no_info, user1_id), + (room_id_with_info, user1_id), + (space_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_unknown_state, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_unknown_state, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=room_id_unknown_state_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=room_id_unknown_state_invite_event.internal_metadata.stream_ordering, + has_known_state=False, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_no_info, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=room_id_no_info_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=room_id_no_info_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_with_info, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_with_info, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=room_id_with_info_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=room_id_with_info_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=space_room_id_invite_event.event_id, + membership=Membership.INVITE, + event_stream_ordering=space_room_id_invite_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_remote_invite_rejections_and_retractions( + self, + ) -> None: + """ + Test that the background update for `sliding_sync_membership_snapshots` + populates missing rows for remote invite rejections/retractions (out-of-band memberships). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_unknown_state, room_id_unknown_state_invite_event = ( + self._create_remote_invite_room_for_user(user1_id, None) + ) + + room_id_no_info, room_id_no_info_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + ], + ) + ) + + room_id_with_info, room_id_with_info_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper room", + }, + ), + StrippedStateEvent( + type=EventTypes.RoomEncryption, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2", + }, + ), + ], + ) + ) + + space_room_id, space_room_id_invite_event = ( + self._create_remote_invite_room_for_user( + user1_id, + [ + StrippedStateEvent( + type=EventTypes.Create, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_CREATOR: "@inviter:remote_server", + EventContentFields.ROOM_VERSION: RoomVersions.V10.identifier, + EventContentFields.ROOM_TYPE: RoomTypes.SPACE, + }, + ), + StrippedStateEvent( + type=EventTypes.Name, + state_key="", + sender="@inviter:remote_server", + content={ + EventContentFields.ROOM_NAME: "my super duper space", + }, + ), + ], + ) + ) + + # Reject the remote invites. + # Also try retracting a remote invite. + room_id_unknown_state_leave_event_response = self.helper.leave( + room_id_unknown_state, user1_id, tok=user1_tok + ) + room_id_no_info_leave_event = self._retract_remote_invite_for_user( + user_id=user1_id, + remote_room_id=room_id_no_info, + ) + room_id_with_info_leave_event_response = self.helper.leave( + room_id_with_info, user1_id, tok=user1_tok + ) + space_room_id_leave_event = self._retract_remote_invite_for_user( + user_id=user1_id, + remote_room_id=space_room_id, + ) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=( + room_id_unknown_state, + room_id_no_info, + room_id_with_info, + space_room_id, + ), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_remote_invite_rejections_and_retractions", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + # The invite memberships for user1 + (room_id_unknown_state, user1_id), + (room_id_no_info, user1_id), + (room_id_with_info, user1_id), + (space_room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_unknown_state, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_unknown_state, + user_id=user1_id, + sender=user1_id, + membership_event_id=room_id_unknown_state_leave_event_response[ + "event_id" + ], + membership=Membership.LEAVE, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + room_id_unknown_state_leave_event_response["event_id"] + ) + ).stream, + has_known_state=False, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_no_info, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=room_id_no_info_leave_event.event_id, + membership=Membership.LEAVE, + event_stream_ordering=room_id_no_info_leave_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_with_info, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_with_info, + user_id=user1_id, + sender=user1_id, + membership_event_id=room_id_with_info_leave_event_response["event_id"], + membership=Membership.LEAVE, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + room_id_with_info_leave_event_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + sender="@inviter:remote_server", + membership_event_id=space_room_id_leave_event.event_id, + membership=Membership.LEAVE, + event_stream_ordering=space_room_id_leave_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + @parameterized.expand( + [ + # We'll do a kick for this + (Membership.LEAVE,), + (Membership.BAN,), + ] + ) + def test_membership_snapshots_background_update_historical_state( + self, test_membership: str + ) -> None: + """ + Test that the background update for `sliding_sync_membership_snapshots` + populates missing rows for leave memberships. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create rooms with various levels of state that should appear in the table + # + room_id_no_info = self.helper.create_room_as(user2_id, tok=user2_tok) + + room_id_with_info = self.helper.create_room_as(user2_id, tok=user2_tok) + # Add a room name + self.helper.send_state( + room_id_with_info, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user2_tok, + ) + # Encrypt the room + self.helper.send_state( + room_id_with_info, + EventTypes.RoomEncryption, + {EventContentFields.ENCRYPTION_ALGORITHM: "m.megolm.v1.aes-sha2"}, + tok=user2_tok, + ) + # Add a tombstone + self.helper.send_state( + room_id_with_info, + EventTypes.Tombstone, + {EventContentFields.TOMBSTONE_SUCCESSOR_ROOM: "another_room"}, + tok=user2_tok, + ) + + space_room_id = self.helper.create_room_as( + user1_id, + tok=user2_tok, + extra_content={ + "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE} + }, + ) + # Add a room name + self.helper.send_state( + space_room_id, + EventTypes.Name, + {"name": "my super duper space"}, + tok=user2_tok, + ) + + # Join the room in preparation for our test_membership + self.helper.join(room_id_no_info, user1_id, tok=user1_tok) + self.helper.join(room_id_with_info, user1_id, tok=user1_tok) + self.helper.join(space_room_id, user1_id, tok=user1_tok) + + if test_membership == Membership.LEAVE: + # Kick user1 from the rooms + user1_membership_room_id_no_info_response = self.helper.change_membership( + room=room_id_no_info, + src=user2_id, + targ=user1_id, + tok=user2_tok, + membership=Membership.LEAVE, + extra_data={ + "reason": "Bad manners", + }, + ) + user1_membership_room_id_with_info_response = self.helper.change_membership( + room=room_id_with_info, + src=user2_id, + targ=user1_id, + tok=user2_tok, + membership=Membership.LEAVE, + extra_data={ + "reason": "Bad manners", + }, + ) + user1_membership_space_room_id_response = self.helper.change_membership( + room=space_room_id, + src=user2_id, + targ=user1_id, + tok=user2_tok, + membership=Membership.LEAVE, + extra_data={ + "reason": "Bad manners", + }, + ) + elif test_membership == Membership.BAN: + # Ban user1 from the rooms + user1_membership_room_id_no_info_response = self.helper.ban( + room_id_no_info, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_membership_room_id_with_info_response = self.helper.ban( + room_id_with_info, src=user2_id, targ=user1_id, tok=user2_tok + ) + user1_membership_space_room_id_response = self.helper.ban( + space_room_id, src=user2_id, targ=user1_id, tok=user2_tok + ) + else: + raise AssertionError("Unknown test_membership") + + # Have user2 leave the rooms to make sure that our background update is not just + # reading from `current_state_events`. For leave memberships, we should be + # reading from the historical state. + self.helper.leave(room_id_no_info, user2_id, tok=user2_tok) + self.helper.leave(room_id_with_info, user2_id, tok=user2_tok) + self.helper.leave(space_room_id, user2_id, tok=user2_tok) + # Check to make sure we actually don't have any `current_state_events` for the rooms + current_state_check_rows = self.get_success( + self.store.db_pool.simple_select_many_batch( + table="current_state_events", + column="room_id", + iterable=[room_id_no_info, room_id_with_info, space_room_id], + retcols=("event_id",), + keyvalues={}, + desc="check current_state_events in test", + ) + ) + self.assertEqual(len(current_state_check_rows), 0) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id_no_info, room_id_with_info, space_room_id), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_historical_state", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + # The memberships for user1 + (room_id_no_info, user1_id), + (room_id_with_info, user1_id), + (space_room_id, user1_id), + # The leave memberships for user2 + (room_id_no_info, user2_id), + (room_id_with_info, user2_id), + (space_room_id, user2_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id_no_info, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_no_info, + user_id=user1_id, + # Because user2 kicked/banned user1 from the room + sender=user2_id, + membership_event_id=user1_membership_room_id_no_info_response[ + "event_id" + ], + membership=test_membership, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_membership_room_id_no_info_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get( + (room_id_with_info, user1_id) + ), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id_with_info, + user_id=user1_id, + # Because user2 kicked/banned user1 from the room + sender=user2_id, + membership_event_id=user1_membership_room_id_with_info_response[ + "event_id" + ], + membership=test_membership, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_membership_room_id_with_info_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=True, + tombstone_successor_room_id="another_room", + ), + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((space_room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=space_room_id, + user_id=user1_id, + # Because user2 kicked/banned user1 from the room + sender=user2_id, + membership_event_id=user1_membership_space_room_id_response["event_id"], + membership=test_membership, + event_stream_ordering=self.get_success( + self.store.get_position_for_event( + user1_membership_space_room_id_response["event_id"] + ) + ).stream, + has_known_state=True, + room_type=RoomTypes.SPACE, + room_name="my super duper space", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + def test_membership_snapshots_background_update_forgotten_missing(self) -> None: + """ + Test that a new row is inserted into `sliding_sync_membership_snapshots` when it + doesn't exist in the table yet. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + # User1 leaves the room (we have to leave in order to forget the room) + self.helper.leave(room_id, user1_id, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + + # Forget the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Clean-up the `sliding_sync_membership_snapshots` table as if the inserts did not + # happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id,), + keyvalues={}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_background_update_forgotten_missing", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.LEAVE, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + # Room is forgotten + forgotten=True, + ), + ) + # Holds the info according to the current state when the user joined + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user2_id, + sender=user2_id, + membership_event_id=state_map[(EventTypes.Member, user2_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user2_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + +class SlidingSyncTablesCatchUpBackgroundUpdatesTestCase(SlidingSyncTablesTestCaseBase): + """ + Test the background updates for catch-up after Synapse downgrade to populate the + `sliding_sync_joined_rooms` and `sliding_sync_membership_snapshots` tables. + + This to test the "catch-up" version of the background update vs the "normal" + background update to populate the tables with all of the historical data. Both + versions share the same background update but just serve different purposes. We + check if the "catch-up" version needs to run on start-up based on whether there have + been any changes to rooms that aren't reflected in the sliding sync tables. + + FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the + foreground update for + `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by + https://github.com/element-hq/synapse/issues/17623) + """ + + def test_joined_background_update_catch_up_new_room(self) -> None: + """ + Test that new rooms while Synapse is downgraded (making + `sliding_sync_joined_rooms` stale) will be caught when Synapse is upgraded and + the catch-up routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate new events + # being sent while Synapse was downgraded. + self.wait_for_background_updates() + + # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made + # it into the table. This is to simulate the a new room while Synapse was + # downgraded. + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + desc="simulate new room while Synapse was downgraded", + ) + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_joined_rooms_table", + _resolve_stale_data_in_sliding_sync_joined_rooms_table, + ) + ) + + # We shouldn't see any new data yet + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + + def test_joined_background_update_catch_up_room_state_change(self) -> None: + """ + Test that new events while Synapse is downgraded (making + `sliding_sync_joined_rooms` stale) will be caught when Synapse is upgraded and + the catch-up routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Get a snapshot of the `sliding_sync_joined_rooms` table before we add some state + sliding_sync_joined_rooms_results_before_state = ( + self._get_sliding_sync_joined_rooms() + ) + self.assertIncludes( + set(sliding_sync_joined_rooms_results_before_state.keys()), + {room_id}, + exact=True, + ) + + # Add a room name + self.helper.send_state( + room_id, + EventTypes.Name, + {"name": "my super duper room"}, + tok=user1_tok, + ) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate new events + # being sent while Synapse was downgraded. + self.wait_for_background_updates() + + # Clean-up the `sliding_sync_joined_rooms` table as if the the room name + # never made it into the table. This is to simulate the room name event + # being sent while Synapse was downgraded. + self.get_success( + self.store.db_pool.simple_update( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + updatevalues={ + # Clear the room name + "room_name": None, + # Reset the `event_stream_ordering` back to the value before the room name + "event_stream_ordering": sliding_sync_joined_rooms_results_before_state[ + room_id + ].event_stream_ordering, + }, + desc="simulate new events while Synapse was downgraded", + ) + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_joined_rooms_table", + _resolve_stale_data_in_sliding_sync_joined_rooms_table, + ) + ) + + # Ensure that the stale data is deleted from the table + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + + def test_joined_background_update_catch_up_no_rooms(self) -> None: + """ + Test that if you start your homeserver with no rooms on a Synapse version that + supports the sliding sync tables and the historical background update completes + (because no rooms to process), then Synapse is downgraded and new rooms are + created/joined; when Synapse is upgraded, the rooms will be processed catch-up + routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate room being + # created while Synapse was downgraded. + self.wait_for_background_updates() + + # Clean-up the `sliding_sync_joined_rooms` table as if the the room never made + # it into the table. This is to simulate the room being created while Synapse + # was downgraded. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_joined_rooms", + column="room_id", + iterable=(room_id,), + keyvalues={}, + desc="simulate room being created while Synapse was downgraded", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_joined_rooms_table", + _resolve_stale_data_in_sliding_sync_joined_rooms_table, + ) + ) + + # We still shouldn't find any data yet + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + set(), + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + + def test_membership_snapshots_background_update_catch_up_new_membership( + self, + ) -> None: + """ + Test that completely new membership while Synapse is downgraded (making + `sliding_sync_membership_snapshots` stale) will be caught when Synapse is + upgraded and the catch-up routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # User2 joins the room + self.helper.join(room_id, user2_id, tok=user2_tok) + + # Both users are joined to the room + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate new events + # being sent while Synapse was downgraded. + self.wait_for_background_updates() + + # Clean-up the `sliding_sync_membership_snapshots` table as if the user2 + # membership never made it into the table. This is to simulate a membership + # change while Synapse was downgraded. + self.get_success( + self.store.db_pool.simple_delete( + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id, "user_id": user2_id}, + desc="simulate new membership while Synapse was downgraded", + ) + ) + + # We shouldn't find the user2 membership in the table because we just deleted it + # in preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + }, + exact=True, + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_membership_snapshots_table", + _resolve_stale_data_in_sliding_sync_membership_snapshots_table, + ) + ) + + # We still shouldn't find any data yet + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + }, + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + + def test_membership_snapshots_background_update_catch_up_membership_change( + self, + ) -> None: + """ + Test that membership changes while Synapse is downgraded (making + `sliding_sync_membership_snapshots` stale) will be caught when Synapse is upgraded and + the catch-up routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # User2 joins the room + self.helper.join(room_id, user2_id, tok=user2_tok) + + # Both users are joined to the room + sliding_sync_membership_snapshots_results_before_membership_changes = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set( + sliding_sync_membership_snapshots_results_before_membership_changes.keys() + ), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + + # User2 leaves the room + self.helper.leave(room_id, user2_id, tok=user2_tok) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate new events + # being sent while Synapse was downgraded. + self.wait_for_background_updates() + + # Rollback the `sliding_sync_membership_snapshots` table as if the user2 + # membership never made it into the table. This is to simulate a membership + # change while Synapse was downgraded. + self.get_success( + self.store.db_pool.simple_update( + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id, "user_id": user2_id}, + updatevalues={ + # Reset everything back to the value before user2 left the room + "membership": sliding_sync_membership_snapshots_results_before_membership_changes[ + (room_id, user2_id) + ].membership, + "membership_event_id": sliding_sync_membership_snapshots_results_before_membership_changes[ + (room_id, user2_id) + ].membership_event_id, + "event_stream_ordering": sliding_sync_membership_snapshots_results_before_membership_changes[ + (room_id, user2_id) + ].event_stream_ordering, + }, + desc="simulate membership change while Synapse was downgraded", + ) + ) + + # We should see user2 still joined to the room because we made that change in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + sliding_sync_membership_snapshots_results_before_membership_changes[ + (room_id, user1_id) + ], + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user2_id)), + sliding_sync_membership_snapshots_results_before_membership_changes[ + (room_id, user2_id) + ], + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_membership_snapshots_table", + _resolve_stale_data_in_sliding_sync_membership_snapshots_table, + ) + ) + + # Ensure that the stale data is deleted from the table + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + }, + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + + def test_membership_snapshots_background_update_catch_up_no_membership( + self, + ) -> None: + """ + Test that if you start your homeserver with no rooms on a Synapse version that + supports the sliding sync tables and the historical background update completes + (because no rooms/membership to process), then Synapse is downgraded and new + rooms are created/joined; when Synapse is upgraded, the rooms will be processed + catch-up routine is run. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Instead of testing with various levels of room state that should appear in the + # table, we're only using one room to keep this test simple. Because the + # underlying background update to populate these tables is the same as this + # catch-up routine, we are going to rely on + # `SlidingSyncTablesBackgroundUpdatesTestCase` to cover that logic. + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + # User2 joins the room + self.helper.join(room_id, user2_id, tok=user2_tok) + + # Make sure all of the background updates have finished before we start the + # catch-up. Even though it should work fine if the other background update is + # still running, we want to see the catch-up routine restore the progress + # correctly. + # + # We also don't want the normal background update messing with our results so we + # run this before we do our manual database clean-up to simulate new events + # being sent while Synapse was downgraded. + self.wait_for_background_updates() + + # Rollback the `sliding_sync_membership_snapshots` table as if the user2 + # membership never made it into the table. This is to simulate a membership + # change while Synapse was downgraded. + self.get_success( + self.store.db_pool.simple_delete_many( + table="sliding_sync_membership_snapshots", + column="room_id", + iterable=(room_id,), + keyvalues={}, + desc="simulate room being created while Synapse was downgraded", + ) + ) + + # We shouldn't find anything in the table because we just deleted them in + # preparation for the test. + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # The function under test. It should clear out stale data and start the + # background update to catch-up on the missing data. + self.get_success( + self.store.db_pool.runInteraction( + "_resolve_stale_data_in_sliding_sync_membership_snapshots_table", + _resolve_stale_data_in_sliding_sync_membership_snapshots_table, + ) + ) + + # We still shouldn't find any data yet + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + set(), + exact=True, + ) + + # Wait for the catch-up background update to finish + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Ensure that the table is populated correctly after the catch-up background + # update finishes + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + + +class SlidingSyncMembershipSnapshotsTableFixForgottenColumnBackgroundUpdatesTestCase( + SlidingSyncTablesTestCaseBase +): + """ + Test the background updates that fixes `sliding_sync_membership_snapshots` -> + `forgotten` column. + """ + + def test_membership_snapshots_fix_forgotten_column_background_update(self) -> None: + """ + Test that the background update, updates the `sliding_sync_membership_snapshots` + -> `forgotten` column to be in sync with the `room_memberships` table. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Leave and forget the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + # User1 forgets the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Re-join the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Reset `sliding_sync_membership_snapshots` table as if the `forgotten` column + # got out of sync from the `room_memberships` table from the previous flawed + # code. + self.get_success( + self.store.db_pool.simple_update_one( + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id, "user_id": user1_id}, + updatevalues={"forgotten": 1}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_fix_forgotten_column_background_update", + ) + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + # Holds the info according to the current state when the user joined. + # + # We only care about checking on user1 as that's what we reset and expect to be + # correct now + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + # We should see the room as no longer forgotten + forgotten=False, + ), + ) diff --git a/tests/storage/test_state_deletion.py b/tests/storage/test_state_deletion.py new file mode 100644
index 0000000000..a4d318ae20 --- /dev/null +++ b/tests/storage/test_state_deletion.py
@@ -0,0 +1,475 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# <https://www.gnu.org/licenses/agpl-3.0.html>. +# + + +import logging + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.rest import admin +from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.util import Clock + +from tests.test_utils.event_injection import create_event +from tests.unittest import HomeserverTestCase + +logger = logging.getLogger(__name__) + + +class StateDeletionStoreTestCase(HomeserverTestCase): + """Tests for the StateDeletionStore.""" + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.state_store = hs.get_datastores().state + self.state_deletion_store = hs.get_datastores().state_deletion + self.purge_events = hs.get_storage_controllers().purge_events + + # We want to disable the automatic deletion of state groups in the + # background, so we can do controlled tests. + self.purge_events._delete_state_loop_call.stop() + + self.user_id = self.register_user("test", "password") + tok = self.login("test", "password") + self.room_id = self.helper.create_room_as(self.user_id, tok=tok) + + def check_if_can_be_deleted(self, state_group: int) -> bool: + """Check if the state group is pending deletion.""" + + state_group_to_sequence_number = self.get_success( + self.state_deletion_store.get_pending_deletions([state_group]) + ) + + can_be_deleted = self.get_success( + self.state_deletion_store.db_pool.runInteraction( + "test_existing_pending_deletion_is_cleared", + self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn, + state_group_to_sequence_number, + ) + ) + + return state_group in can_be_deleted + + def test_no_deletion(self) -> None: + """Test that calling persisting_state_group_references is fine if + nothing is pending deletion""" + event, context = self.get_success( + create_event( + self.hs, + room_id=self.room_id, + type="m.test", + sender=self.user_id, + ) + ) + + ctx_mgr = self.state_deletion_store.persisting_state_group_references( + [(event, context)] + ) + + self.get_success(ctx_mgr.__aenter__()) + self.get_success(ctx_mgr.__aexit__(None, None, None)) + + def test_no_deletion_error(self) -> None: + """Test that calling persisting_state_group_references is fine if + nothing is pending deletion, but an error occurs.""" + + event, context = self.get_success( + create_event( + self.hs, + room_id=self.room_id, + type="m.test", + sender=self.user_id, + ) + ) + + ctx_mgr = self.state_deletion_store.persisting_state_group_references( + [(event, context)] + ) + + self.get_success(ctx_mgr.__aenter__()) + self.get_success(ctx_mgr.__aexit__(Exception, Exception("test"), None)) + + def test_existing_pending_deletion_is_cleared(self) -> None: + """Test that the pending deletion flag gets cleared when the state group + gets persisted.""" + + event, context = self.get_success( + create_event( + self.hs, + room_id=self.room_id, + type="m.test", + state_key="", + sender=self.user_id, + ) + ) + assert context.state_group is not None + + # Mark a state group that we're referencing as pending deletion. + self.get_success( + self.state_deletion_store.mark_state_groups_as_pending_deletion( + [context.state_group] + ) + ) + + ctx_mgr = self.state_deletion_store.persisting_state_group_references( + [(event, context)] + ) + + self.get_success(ctx_mgr.__aenter__()) + self.get_success(ctx_mgr.__aexit__(None, None, None)) + + # The pending deletion flag should be cleared + pending_deletion = self.get_success( + self.state_deletion_store.db_pool.simple_select_one_onecol( + table="state_groups_pending_deletion", + keyvalues={"state_group": context.state_group}, + retcol="1", + allow_none=True, + desc="test_existing_pending_deletion_is_cleared", + ) + ) + self.assertIsNone(pending_deletion) + + def test_pending_deletion_is_cleared_during_persist(self) -> None: + """Test that the pending deletion flag is cleared when a state group + gets marked for deletion during persistence""" + + event, context = self.get_success( + create_event( + self.hs, + room_id=self.room_id, + type="m.test", + state_key="", + sender=self.user_id, + ) + ) + assert context.state_group is not None + + ctx_mgr = self.state_deletion_store.persisting_state_group_references( + [(event, context)] + ) + self.get_success(ctx_mgr.__aenter__()) + + # Mark the state group that we're referencing as pending deletion, + # *after* we have started persisting. + self.get_success( + self.state_deletion_store.mark_state_groups_as_pending_deletion( + [context.state_group] + ) + ) + + self.get_success(ctx_mgr.__aexit__(None, None, None)) + + # The pending deletion flag should be cleared + pending_deletion = self.get_success( + self.state_deletion_store.db_pool.simple_select_one_onecol( + table="state_groups_pending_deletion", + keyvalues={"state_group": context.state_group}, + retcol="1", + allow_none=True, + desc="test_existing_pending_deletion_is_cleared", + ) + ) + self.assertIsNone(pending_deletion) + + def test_deletion_check(self) -> None: + """Test that the `get_state_groups_that_can_be_purged_txn` check is + correct during different points of the lifecycle of persisting an + event.""" + event, context = self.get_success( + create_event( + self.hs, + room_id=self.room_id, + type="m.test", + state_key="", + sender=self.user_id, + ) + ) + assert context.state_group is not None + + self.get_success( + self.state_deletion_store.mark_state_groups_as_pending_deletion( + [context.state_group] + ) + ) + + # We shouldn't be able to delete the state group as not enough time as passed + can_be_deleted = self.check_if_can_be_deleted(context.state_group) + self.assertFalse(can_be_deleted) + + # After enough time we can delete the state group + self.reactor.advance( + 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000 + ) + can_be_deleted = self.check_if_can_be_deleted(context.state_group) + self.assertTrue(can_be_deleted) + + ctx_mgr = self.state_deletion_store.persisting_state_group_references( + [(event, context)] + ) + self.get_success(ctx_mgr.__aenter__()) + + # But once we start persisting we can't delete the state group + can_be_deleted = self.check_if_can_be_deleted(context.state_group) + self.assertFalse(can_be_deleted) + + self.get_success(ctx_mgr.__aexit__(None, None, None)) + + # The pending deletion flag should remain cleared after persistence has + # finished. + can_be_deleted = self.check_if_can_be_deleted(context.state_group) + self.assertFalse(can_be_deleted) + + def test_deletion_error_during_persistence(self) -> None: + """Test that state groups remain marked as pending deletion if persisting + the event fails.""" + + event, context = self.get_success( + create_event( + self.hs, + room_id=self.room_id, + type="m.test", + state_key="", + sender=self.user_id, + ) + ) + assert context.state_group is not None + + # Mark a state group that we're referencing as pending deletion. + self.get_success( + self.state_deletion_store.mark_state_groups_as_pending_deletion( + [context.state_group] + ) + ) + + ctx_mgr = self.state_deletion_store.persisting_state_group_references( + [(event, context)] + ) + + self.get_success(ctx_mgr.__aenter__()) + self.get_success(ctx_mgr.__aexit__(Exception, Exception("test"), None)) + + # We should be able to delete the state group after a certain amount of + # time + self.reactor.advance( + 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000 + ) + can_be_deleted = self.check_if_can_be_deleted(context.state_group) + self.assertTrue(can_be_deleted) + + def test_race_between_check_and_insert(self) -> None: + """Check that we correctly handle the race where we go to delete a + state group, check that it is unreferenced, and then it becomes + referenced just before we delete it.""" + + event, context = self.get_success( + create_event( + self.hs, + room_id=self.room_id, + type="m.test", + state_key="", + sender=self.user_id, + ) + ) + assert context.state_group is not None + + # Mark a state group that we're referencing as pending deletion. + self.get_success( + self.state_deletion_store.mark_state_groups_as_pending_deletion( + [context.state_group] + ) + ) + + # Advance time enough so we can delete the state group + self.reactor.advance( + 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000 + ) + + # Check that we'd be able to delete this state group. + state_group_to_sequence_number = self.get_success( + self.state_deletion_store.get_pending_deletions([context.state_group]) + ) + + can_be_deleted = self.get_success( + self.state_deletion_store.db_pool.runInteraction( + "test_existing_pending_deletion_is_cleared", + self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn, + state_group_to_sequence_number, + ) + ) + self.assertCountEqual(can_be_deleted, [context.state_group]) + + # ... in the real world we'd check that the state group isn't referenced here ... + + # Now we persist the event to reference the state group, *after* we + # check that the state group wasn't referenced + ctx_mgr = self.state_deletion_store.persisting_state_group_references( + [(event, context)] + ) + + self.get_success(ctx_mgr.__aenter__()) + self.get_success(ctx_mgr.__aexit__(Exception, Exception("test"), None)) + + # We simulate a pause (required to hit the race) + self.reactor.advance( + 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000 + ) + + # We should no longer be able to delete the state group, without having + # to recheck if its referenced. + can_be_deleted = self.get_success( + self.state_deletion_store.db_pool.runInteraction( + "test_existing_pending_deletion_is_cleared", + self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn, + state_group_to_sequence_number, + ) + ) + self.assertCountEqual(can_be_deleted, []) + + def test_remove_ancestors_from_can_delete(self) -> None: + """Test that if a state group is not ready to be deleted, we also don't + delete anything that is referenced by it""" + + event, context = self.get_success( + create_event( + self.hs, + room_id=self.room_id, + type="m.test", + state_key="", + sender=self.user_id, + ) + ) + assert context.state_group is not None + + # Create a new state group that references the one from the event + new_state_group = self.get_success( + self.state_store.store_state_group( + event.event_id, + event.room_id, + prev_group=context.state_group, + delta_ids={}, + current_state_ids=None, + ) + ) + + # Mark them both as pending deletion + self.get_success( + self.state_deletion_store.mark_state_groups_as_pending_deletion( + [context.state_group, new_state_group] + ) + ) + + # Advance time enough so we can delete the state group so they're both + # ready for deletion. + self.reactor.advance( + 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000 + ) + + # We can now delete both state groups + self.assertTrue(self.check_if_can_be_deleted(context.state_group)) + self.assertTrue(self.check_if_can_be_deleted(new_state_group)) + + # Use the new_state_group to bump its deletion time + self.get_success( + self.state_store.store_state_group( + event.event_id, + event.room_id, + prev_group=new_state_group, + delta_ids={}, + current_state_ids=None, + ) + ) + + # We should now not be able to delete either of the state groups. + state_group_to_sequence_number = self.get_success( + self.state_deletion_store.get_pending_deletions( + [context.state_group, new_state_group] + ) + ) + + # We shouldn't be able to delete the state group as not enough time has passed + can_be_deleted = self.get_success( + self.state_deletion_store.db_pool.runInteraction( + "test_existing_pending_deletion_is_cleared", + self.state_deletion_store.get_state_groups_ready_for_potential_deletion_txn, + state_group_to_sequence_number, + ) + ) + self.assertCountEqual(can_be_deleted, []) + + def test_newly_referenced_state_group_gets_removed_from_pending(self) -> None: + """Check that if a state group marked for deletion becomes referenced + (without being removed from pending deletion table), it gets removed + from pending deletion table.""" + + event, context = self.get_success( + create_event( + self.hs, + room_id=self.room_id, + type="m.test", + state_key="", + sender=self.user_id, + ) + ) + assert context.state_group is not None + + # Mark a state group that we're referencing as pending deletion. + self.get_success( + self.state_deletion_store.mark_state_groups_as_pending_deletion( + [context.state_group] + ) + ) + + # Advance time enough so we can delete the state group so they're both + # ready for deletion. + self.reactor.advance( + 1 + self.state_deletion_store.DELAY_BEFORE_DELETION_MS / 1000 + ) + + # Manually insert into the table to mimic the state group getting used. + self.get_success( + self.store.db_pool.simple_insert( + table="event_to_state_groups", + values={"state_group": context.state_group, "event_id": event.event_id}, + desc="test_newly_referenced_state_group_gets_removed_from_pending", + ) + ) + + # Manually run the background task to delete pending state groups. + self.get_success(self.purge_events._delete_state_groups_loop()) + + # The pending deletion flag should be cleared... + pending_deletion = self.get_success( + self.state_deletion_store.db_pool.simple_select_one_onecol( + table="state_groups_pending_deletion", + keyvalues={"state_group": context.state_group}, + retcol="1", + allow_none=True, + desc="test_newly_referenced_state_group_gets_removed_from_pending", + ) + ) + self.assertIsNone(pending_deletion) + + # .. but the state should not have been deleted. + state = self.get_success( + self.state_store._get_state_for_groups([context.state_group]) + ) + self.assertGreater(len(state[context.state_group]), 0) diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py
index 7b7590da76..0f58dc8a0a 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py
@@ -27,7 +27,13 @@ from immutabledict import immutabledict from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import Direction, EventTypes, Membership, RelationTypes +from synapse.api.constants import ( + Direction, + EventTypes, + JoinRules, + Membership, + RelationTypes, +) from synapse.api.filtering import Filter from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import FrozenEventV3 @@ -147,7 +153,7 @@ class PaginationTestCase(HomeserverTestCase): def _filter_messages(self, filter: JsonDict) -> List[str]: """Make a request to /messages with a filter, returns the chunk of events.""" - events, next_key = self.get_success( + events, next_key, _ = self.get_success( self.hs.get_datastores().main.paginate_room_events_by_topological_ordering( room_id=self.room_id, from_key=self.from_token.room_key, @@ -1154,7 +1160,7 @@ class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase): room_id=room_id1, event_id=None, event_pos=dummy_state_pos, - membership="leave", + membership=Membership.LEAVE, sender=None, # user1_id, prev_event_id=join_response1["event_id"], prev_event_pos=join_pos1, @@ -1164,6 +1170,75 @@ class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase): ], ) + def test_state_reset2(self) -> None: + """ + Test a state reset scenario where the user gets removed from the room (when + there is no corresponding leave event) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, is_public=True, tok=user2_tok) + + event_response = self.helper.send(room_id1, "test", tok=user2_tok) + event_id = event_response["event_id"] + + user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) + user1_join_pos = self.get_success( + self.store.get_position_for_event(user1_join_response["event_id"]) + ) + + before_reset_token = self.event_sources.get_current_token() + + # Trigger a state reset + join_rule_event, join_rule_context = self.get_success( + create_event( + self.hs, + prev_event_ids=[event_id], + type=EventTypes.JoinRules, + state_key="", + content={"join_rule": JoinRules.INVITE}, + sender=user2_id, + room_id=room_id1, + room_version=self.get_success(self.store.get_room_version_id(room_id1)), + ) + ) + _, join_rule_event_pos, _ = self.get_success( + self.persistence.persist_event(join_rule_event, join_rule_context) + ) + + after_reset_token = self.event_sources.get_current_token() + + membership_changes = self.get_success( + self.store.get_current_state_delta_membership_changes_for_user( + user1_id, + from_key=before_reset_token.room_key, + to_key=after_reset_token.room_key, + ) + ) + + # Let the whole diff show on failure + self.maxDiff = None + self.assertEqual( + membership_changes, + [ + CurrentStateDeltaMembership( + room_id=room_id1, + event_id=None, + # The position where the state reset happened + event_pos=join_rule_event_pos, + membership=Membership.LEAVE, + sender=None, + prev_event_id=user1_join_response["event_id"], + prev_event_pos=user1_join_pos, + prev_membership="join", + prev_sender=user1_id, + ), + ], + ) + def test_excluded_room_ids(self) -> None: """ Test that the `excluded_room_ids` option excludes changes from the specified rooms. @@ -1384,20 +1459,25 @@ class GetCurrentStateDeltaMembershipChangesForUserFederationTestCase( ) ) - with patch.object( - self.room_member_handler.federation_handler.federation_client, - "make_membership_event", - mock_make_membership_event, - ), patch.object( - self.room_member_handler.federation_handler.federation_client, - "send_join", - mock_send_join, - ), patch( - "synapse.event_auth._is_membership_change_allowed", - return_value=None, - ), patch( - "synapse.handlers.federation_event.check_state_dependent_auth_rules", - return_value=None, + with ( + patch.object( + self.room_member_handler.federation_handler.federation_client, + "make_membership_event", + mock_make_membership_event, + ), + patch.object( + self.room_member_handler.federation_handler.federation_client, + "send_join", + mock_send_join, + ), + patch( + "synapse.event_auth._is_membership_change_allowed", + return_value=None, + ), + patch( + "synapse.handlers.federation_event.check_state_dependent_auth_rules", + return_value=None, + ), ): self.get_success( self.room_member_handler.update_membership( diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index 6d1ae4c8d7..f12402f5f2 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py
@@ -292,12 +292,14 @@ class EventAuthTestCase(unittest.TestCase): ] # pleb should not be able to send state - self.assertRaises( - AuthError, - event_auth.check_state_dependent_auth_rules, - _random_state_event(RoomVersions.V1, pleb), - auth_events, - ), + ( + self.assertRaises( + AuthError, + event_auth.check_state_dependent_auth_rules, + _random_state_event(RoomVersions.V1, pleb), + auth_events, + ), + ) # king should be able to send state event_auth.check_state_dependent_auth_rules( diff --git a/tests/test_federation.py b/tests/test_federation.py deleted file mode 100644
index 4e9adc0625..0000000000 --- a/tests/test_federation.py +++ /dev/null
@@ -1,376 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2020 The Matrix.org Foundation C.I.C. -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -from typing import Collection, List, Optional, Union -from unittest.mock import AsyncMock, Mock - -from twisted.test.proto_helpers import MemoryReactor - -from synapse.api.errors import FederationError -from synapse.api.room_versions import RoomVersion, RoomVersions -from synapse.events import EventBase, make_event_from_dict -from synapse.events.snapshot import EventContext -from synapse.federation.federation_base import event_from_pdu_json -from synapse.handlers.device import DeviceListUpdater -from synapse.http.types import QueryParams -from synapse.logging.context import LoggingContext -from synapse.server import HomeServer -from synapse.types import JsonDict, UserID, create_requester -from synapse.util import Clock -from synapse.util.retryutils import NotRetryingDestination - -from tests import unittest - - -class MessageAcceptTests(unittest.HomeserverTestCase): - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.http_client = Mock() - return self.setup_test_homeserver(federation_http_client=self.http_client) - - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - user_id = UserID("us", "test") - our_user = create_requester(user_id) - room_creator = self.hs.get_room_creation_handler() - self.room_id = self.get_success( - room_creator.create_room( - our_user, room_creator._presets_dict["public_chat"], ratelimit=False - ) - )[0] - - self.store = self.hs.get_datastores().main - - # Figure out what the most recent event is - most_recent = next( - iter( - self.get_success( - self.hs.get_datastores().main.get_latest_event_ids_in_room( - self.room_id - ) - ) - ) - ) - - join_event = make_event_from_dict( - { - "room_id": self.room_id, - "sender": "@baduser:test.serv", - "state_key": "@baduser:test.serv", - "event_id": "$join:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.member", - "origin": "test.servx", - "content": {"membership": "join"}, - "auth_events": [], - "prev_state": [(most_recent, {})], - "prev_events": [(most_recent, {})], - } - ) - - self.handler = self.hs.get_federation_handler() - federation_event_handler = self.hs.get_federation_event_handler() - - async def _check_event_auth( - origin: Optional[str], event: EventBase, context: EventContext - ) -> None: - pass - - federation_event_handler._check_event_auth = _check_event_auth # type: ignore[method-assign] - self.client = self.hs.get_federation_client() - - async def _check_sigs_and_hash_for_pulled_events_and_fetch( - dest: str, pdus: Collection[EventBase], room_version: RoomVersion - ) -> List[EventBase]: - return list(pdus) - - self.client._check_sigs_and_hash_for_pulled_events_and_fetch = _check_sigs_and_hash_for_pulled_events_and_fetch # type: ignore[assignment] - - # Send the join, it should return None (which is not an error) - self.assertEqual( - self.get_success( - federation_event_handler.on_receive_pdu("test.serv", join_event) - ), - None, - ) - - # Make sure we actually joined the room - self.assertEqual( - self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)), - {"$join:test.serv"}, - ) - - def test_cant_hide_direct_ancestors(self) -> None: - """ - If you send a message, you must be able to provide the direct - prev_events that said event references. - """ - - async def post_json( - destination: str, - path: str, - data: Optional[JsonDict] = None, - long_retries: bool = False, - timeout: Optional[int] = None, - ignore_backoff: bool = False, - args: Optional[QueryParams] = None, - ) -> Union[JsonDict, list]: - # If it asks us for new missing events, give them NOTHING - if path.startswith("/_matrix/federation/v1/get_missing_events/"): - return {"events": []} - return {} - - self.http_client.post_json = post_json - - # Figure out what the most recent event is - most_recent = next( - iter( - self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)) - ) - ) - - # Now lie about an event - lying_event = make_event_from_dict( - { - "room_id": self.room_id, - "sender": "@baduser:test.serv", - "event_id": "one:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.message", - "origin": "test.serv", - "content": {"body": "hewwo?"}, - "auth_events": [], - "prev_events": [("two:test.serv", {}), (most_recent, {})], - } - ) - - federation_event_handler = self.hs.get_federation_event_handler() - with LoggingContext("test-context"): - failure = self.get_failure( - federation_event_handler.on_receive_pdu("test.serv", lying_event), - FederationError, - ) - - # on_receive_pdu should throw an error - self.assertEqual( - failure.value.args[0], - ( - "ERROR 403: Your server isn't divulging details about prev_events " - "referenced in this event." - ), - ) - - # Make sure the invalid event isn't there - extrem = self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)) - self.assertEqual(extrem, {"$join:test.serv"}) - - def test_retry_device_list_resync(self) -> None: - """Tests that device lists are marked as stale if they couldn't be synced, and - that stale device lists are retried periodically. - """ - remote_user_id = "@john:test_remote" - remote_origin = "test_remote" - - # Track the number of attempts to resync the user's device list. - self.resync_attempts = 0 - - # When this function is called, increment the number of resync attempts (only if - # we're querying devices for the right user ID), then raise a - # NotRetryingDestination error to fail the resync gracefully. - def query_user_devices( - destination: str, user_id: str, timeout: int = 30000 - ) -> JsonDict: - if user_id == remote_user_id: - self.resync_attempts += 1 - - raise NotRetryingDestination(0, 0, destination) - - # Register the mock on the federation client. - federation_client = self.hs.get_federation_client() - federation_client.query_user_devices = Mock(side_effect=query_user_devices) # type: ignore[method-assign] - - # Register a mock on the store so that the incoming update doesn't fail because - # we don't share a room with the user. - store = self.hs.get_datastores().main - store.get_rooms_for_user = AsyncMock(return_value=["!someroom:test"]) - - # Manually inject a fake device list update. We need this update to include at - # least one prev_id so that the user's device list will need to be retried. - device_list_updater = self.hs.get_device_handler().device_list_updater - assert isinstance(device_list_updater, DeviceListUpdater) - self.get_success( - device_list_updater.incoming_device_list_update( - origin=remote_origin, - edu_content={ - "deleted": False, - "device_display_name": "Mobile", - "device_id": "QBUAZIFURK", - "prev_id": [5], - "stream_id": 6, - "user_id": remote_user_id, - }, - ) - ) - - # Check that there was one resync attempt. - self.assertEqual(self.resync_attempts, 1) - - # Check that the resync attempt failed and caused the user's device list to be - # marked as stale. - need_resync = self.get_success( - store.get_user_ids_requiring_device_list_resync() - ) - self.assertIn(remote_user_id, need_resync) - - # Check that waiting for 30 seconds caused Synapse to retry resyncing the device - # list. - self.reactor.advance(30) - self.assertEqual(self.resync_attempts, 2) - - def test_cross_signing_keys_retry(self) -> None: - """Tests that resyncing a device list correctly processes cross-signing keys from - the remote server. - """ - remote_user_id = "@john:test_remote" - remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" - remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" - - # Register mock device list retrieval on the federation client. - federation_client = self.hs.get_federation_client() - federation_client.query_user_devices = AsyncMock( # type: ignore[method-assign] - return_value={ - "user_id": remote_user_id, - "stream_id": 1, - "devices": [], - "master_key": { - "user_id": remote_user_id, - "usage": ["master"], - "keys": {"ed25519:" + remote_master_key: remote_master_key}, - }, - "self_signing_key": { - "user_id": remote_user_id, - "usage": ["self_signing"], - "keys": { - "ed25519:" + remote_self_signing_key: remote_self_signing_key - }, - }, - } - ) - - # Resync the device list. - device_handler = self.hs.get_device_handler() - self.get_success( - device_handler.device_list_updater.multi_user_device_resync( - [remote_user_id] - ), - ) - - # Retrieve the cross-signing keys for this user. - keys = self.get_success( - self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]), - ) - self.assertIn(remote_user_id, keys) - key = keys[remote_user_id] - assert key is not None - - # Check that the master key is the one returned by the mock. - master_key = key["master"] - self.assertEqual(len(master_key["keys"]), 1) - self.assertTrue("ed25519:" + remote_master_key in master_key["keys"].keys()) - self.assertTrue(remote_master_key in master_key["keys"].values()) - - # Check that the self-signing key is the one returned by the mock. - self_signing_key = key["self_signing"] - self.assertEqual(len(self_signing_key["keys"]), 1) - self.assertTrue( - "ed25519:" + remote_self_signing_key in self_signing_key["keys"].keys(), - ) - self.assertTrue(remote_self_signing_key in self_signing_key["keys"].values()) - - -class StripUnsignedFromEventsTestCase(unittest.TestCase): - def test_strip_unauthorized_unsigned_values(self) -> None: - event1 = { - "sender": "@baduser:test.serv", - "state_key": "@baduser:test.serv", - "event_id": "$event1:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.member", - "origin": "test.servx", - "content": {"membership": "join"}, - "auth_events": [], - "unsigned": {"malicious garbage": "hackz", "more warez": "more hackz"}, - } - filtered_event = event_from_pdu_json(event1, RoomVersions.V1) - # Make sure unauthorized fields are stripped from unsigned - self.assertNotIn("more warez", filtered_event.unsigned) - - def test_strip_event_maintains_allowed_fields(self) -> None: - event2 = { - "sender": "@baduser:test.serv", - "state_key": "@baduser:test.serv", - "event_id": "$event2:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.member", - "origin": "test.servx", - "auth_events": [], - "content": {"membership": "join"}, - "unsigned": { - "malicious garbage": "hackz", - "more warez": "more hackz", - "age": 14, - "invite_room_state": [], - }, - } - - filtered_event2 = event_from_pdu_json(event2, RoomVersions.V1) - self.assertIn("age", filtered_event2.unsigned) - self.assertEqual(14, filtered_event2.unsigned["age"]) - self.assertNotIn("more warez", filtered_event2.unsigned) - # Invite_room_state is allowed in events of type m.room.member - self.assertIn("invite_room_state", filtered_event2.unsigned) - self.assertEqual([], filtered_event2.unsigned["invite_room_state"]) - - def test_strip_event_removes_fields_based_on_event_type(self) -> None: - event3 = { - "sender": "@baduser:test.serv", - "state_key": "@baduser:test.serv", - "event_id": "$event3:test.serv", - "depth": 1000, - "origin_server_ts": 1, - "type": "m.room.power_levels", - "origin": "test.servx", - "content": {}, - "auth_events": [], - "unsigned": { - "malicious garbage": "hackz", - "more warez": "more hackz", - "age": 14, - "invite_room_state": [], - }, - } - filtered_event3 = event_from_pdu_json(event3, RoomVersions.V1) - self.assertIn("age", filtered_event3.unsigned) - # Invite_room_state field is only permitted in event type m.room.member - self.assertNotIn("invite_room_state", filtered_event3.unsigned) - self.assertNotIn("more warez", filtered_event3.unsigned) diff --git a/tests/test_mau.py b/tests/test_mau.py
index 714854cdf2..820913dde4 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py
@@ -332,6 +332,9 @@ class TestMauLimit(unittest.HomeserverTestCase): ) if channel.code != 200: + #raise Exception( + # f"Failed to register user {localpart}: {channel.code} {channel.text_body}" + #) raise HttpResponseException( channel.code, channel.result["reason"], channel.result["body"] ).to_synapse_error() diff --git a/tests/test_server.py b/tests/test_server.py
index 9ff2589497..9cb6766b5f 100644 --- a/tests/test_server.py +++ b/tests/test_server.py
@@ -233,9 +233,7 @@ class OptionsResourceTests(unittest.TestCase): self.resource = OptionsResource() self.resource.putChild(b"res", DummyResource()) - def _make_request( - self, method: bytes, path: bytes, experimental_cors_msc3886: bool = False - ) -> FakeChannel: + def _make_request(self, method: bytes, path: bytes) -> FakeChannel: """Create a request from the method/path and return a channel with the response.""" # Create a site and query for the resource. site = SynapseSite( @@ -246,7 +244,6 @@ class OptionsResourceTests(unittest.TestCase): { "type": "http", "port": 0, - "experimental_cors_msc3886": experimental_cors_msc3886, }, ), self.resource, @@ -283,32 +280,6 @@ class OptionsResourceTests(unittest.TestCase): [b"Synapse-Trace-Id, Server"], ) - def _check_cors_msc3886_headers(self, channel: FakeChannel) -> None: - # Ensure the correct CORS headers have been added - # as per https://github.com/matrix-org/matrix-spec-proposals/blob/hughns/simple-rendezvous-capability/proposals/3886-simple-rendezvous-capability.md#cors - self.assertEqual( - channel.headers.getRawHeaders(b"Access-Control-Allow-Origin"), - [b"*"], - "has correct CORS Origin header", - ) - self.assertEqual( - channel.headers.getRawHeaders(b"Access-Control-Allow-Methods"), - [b"GET, HEAD, POST, PUT, DELETE, OPTIONS"], # HEAD isn't in the spec - "has correct CORS Methods header", - ) - self.assertEqual( - channel.headers.getRawHeaders(b"Access-Control-Allow-Headers"), - [ - b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match" - ], - "has correct CORS Headers header", - ) - self.assertEqual( - channel.headers.getRawHeaders(b"Access-Control-Expose-Headers"), - [b"ETag, Location, X-Max-Bytes"], - "has correct CORS Expose Headers header", - ) - def test_unknown_options_request(self) -> None: """An OPTIONS requests to an unknown URL still returns 204 No Content.""" channel = self._make_request(b"OPTIONS", b"/foo/") @@ -325,16 +296,6 @@ class OptionsResourceTests(unittest.TestCase): self._check_cors_standard_headers(channel) - def test_known_options_request_msc3886(self) -> None: - """An OPTIONS requests to an known URL still returns 204 No Content.""" - channel = self._make_request( - b"OPTIONS", b"/res/", experimental_cors_msc3886=True - ) - self.assertEqual(channel.code, 204) - self.assertNotIn("body", channel.result) - - self._check_cors_msc3886_headers(channel) - def test_unknown_request(self) -> None: """A non-OPTIONS request to an unknown URL should 404.""" channel = self._make_request(b"GET", b"/foo/") diff --git a/tests/test_state.py b/tests/test_state.py
index 311a590693..adb72b0730 100644 --- a/tests/test_state.py +++ b/tests/test_state.py
@@ -31,7 +31,7 @@ from typing import ( Tuple, cast, ) -from unittest.mock import Mock +from unittest.mock import AsyncMock, Mock from twisted.internet import defer @@ -149,7 +149,7 @@ class _DummyStore: async def get_partial_state_events( self, event_ids: Collection[str] ) -> Dict[str, bool]: - return {e: False for e in event_ids} + return dict.fromkeys(event_ids, False) async def get_state_group_delta( self, name: str @@ -221,7 +221,16 @@ class Graph: class StateTestCase(unittest.TestCase): def setUp(self) -> None: self.dummy_store = _DummyStore() - storage_controllers = Mock(main=self.dummy_store, state=self.dummy_store) + + # Add a dummy epoch store that always retruns that we have all the + # necessary state groups. + dummy_deletion_store = AsyncMock() + dummy_deletion_store.check_state_groups_and_bump_deletion.return_value = [] + + storage_controllers = Mock( + main=self.dummy_store, + state=self.dummy_store, + ) hs = Mock( spec_set=[ "config", @@ -241,7 +250,10 @@ class StateTestCase(unittest.TestCase): ) clock = cast(Clock, MockClock()) hs.config = default_config("tesths", True) - hs.get_datastores.return_value = Mock(main=self.dummy_store) + hs.get_datastores.return_value = Mock( + main=self.dummy_store, + state_deletion=dummy_deletion_store, + ) hs.get_state_handler.return_value = None hs.get_clock.return_value = clock hs.get_macaroon_generator.return_value = MacaroonGenerator( diff --git a/tests/test_types.py b/tests/test_types.py
index 00adc65a5a..0c08bc8ecc 100644 --- a/tests/test_types.py +++ b/tests/test_types.py
@@ -145,7 +145,9 @@ class MapUsernameTestCase(unittest.TestCase): (MultiWriterStreamToken,), (RoomStreamToken,), ], - class_name_func=lambda cls, num, params_dict: f"{cls.__name__}_{params_dict['token_type'].__name__}", + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{params_dict['token_type'].__name__}", ) class MultiWriterTokenTestCase(unittest.HomeserverTestCase): """Tests for the different types of multi writer tokens.""" diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py
index 4ab42a02b9..3e6fd03600 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py
@@ -22,6 +22,8 @@ """ Utilities for running the unit tests """ + +import base64 import json import sys import warnings @@ -137,3 +139,23 @@ SMALL_PNG = unhexlify( b"0000001f15c4890000000a49444154789c63000100000500010d" b"0a2db40000000049454e44ae426082" ) +# The SHA256 hexdigest for the above bytes. +SMALL_PNG_SHA256 = "ebf4f635a17d10d6eb46ba680b70142419aa3220f228001a036d311a22ee9d2a" + +# A small CMYK-encoded JPEG image used in some tests. +# +# Generated with: +# img = PIL.Image.new('CMYK', (1, 1), (0, 0, 0, 0)) +# img.save('minimal_cmyk.jpg', 'JPEG') +# +# Resolution: 1x1, MIME type: image/jpeg, Extension: jpeg, Size: 4 KiB +SMALL_CMYK_JPEG = base64.b64decode(""" +/9j/7gAOQWRvYmUAZAAAAAAA/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCww +ZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/8 +AAFAgAAQABBEMRAE0RAFkRAEsRAP/EAB8AAAEFAQEBAQEBAAAAAAAAAAABA +gMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNR +YQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkN +ERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlp +eYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5 +ebn6Onq8fLz9PX29/j5+v/aAA4EQwBNAFkASwAAPwD3+vf69/r3+v/Z +""") diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py
index dd40c338d6..d58222a9f6 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py
@@ -48,7 +48,7 @@ def setup_logging() -> None: # We exclude `%(asctime)s` from this format because the Twisted logger adds its own # timestamp - log_format = "%(name)s - %(lineno)d - " "%(levelname)s - %(request)s - %(message)s" + log_format = "%(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s" handler = ToTwistedHandler() formatter = logging.Formatter(log_format) diff --git a/tests/test_utils/oidc.py b/tests/test_utils/oidc.py
index 6c4be1c1f8..5bf5e5cb0c 100644 --- a/tests/test_utils/oidc.py +++ b/tests/test_utils/oidc.py
@@ -20,7 +20,9 @@ # +import base64 import json +from hashlib import sha256 from typing import Any, ContextManager, Dict, List, Optional, Tuple from unittest.mock import Mock, patch from urllib.parse import parse_qs @@ -154,10 +156,23 @@ class FakeOidcServer: json_payload = json.dumps(payload) return jws.serialize_compact(protected, json_payload, self._key).decode("utf-8") - def generate_id_token(self, grant: FakeAuthorizationGrant) -> str: + def generate_id_token( + self, grant: FakeAuthorizationGrant, access_token: str + ) -> str: + # Generate a hash of the access token for the optional + # `at_hash` field in an ID Token. + # + # 3.1.3.6. ID Token, https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken + at_hash = ( + base64.urlsafe_b64encode(sha256(access_token.encode("ascii")).digest()[:16]) + .rstrip(b"=") + .decode("ascii") + ) + now = int(self._clock.time()) id_token = { **grant.userinfo, + "at_hash": at_hash, "iss": self.issuer, "aud": grant.client_id, "iat": now, @@ -243,7 +258,7 @@ class FakeOidcServer: } if "openid" in grant.scope: - token["id_token"] = self.generate_id_token(grant) + token["id_token"] = self.generate_id_token(grant, access_token) return dict(token) diff --git a/tests/unittest.py b/tests/unittest.py
index 4aa7f56106..24077d79d6 100644 --- a/tests/unittest.py +++ b/tests/unittest.py
@@ -40,6 +40,7 @@ from typing import ( Mapping, NoReturn, Optional, + Protocol, Tuple, Type, TypeVar, @@ -50,7 +51,7 @@ from unittest.mock import Mock, patch import canonicaljson import signedjson.key import unpaddedbase64 -from typing_extensions import Concatenate, ParamSpec, Protocol +from typing_extensions import Concatenate, ParamSpec from twisted.internet.defer import Deferred, ensureDeferred from twisted.python.failure import Failure @@ -272,8 +273,8 @@ class TestCase(unittest.TestCase): def assertIncludes( self, - actual_items: AbstractSet[str], - expected_items: AbstractSet[str], + actual_items: AbstractSet[TV], + expected_items: AbstractSet[TV], exact: bool = False, message: Optional[str] = None, ) -> None: @@ -457,7 +458,9 @@ class HomeserverTestCase(TestCase): # Type ignore: mypy doesn't like us assigning to methods. self.hs.get_auth().get_user_by_req = get_requester # type: ignore[method-assign] self.hs.get_auth().get_user_by_access_token = get_requester # type: ignore[method-assign] - self.hs.get_auth().get_access_token_from_request = Mock(return_value=token) # type: ignore[method-assign] + self.hs.get_auth().get_access_token_from_request = Mock( # type: ignore[method-assign] + return_value=token + ) if self.needs_threadpool: self.reactor.threadpool = ThreadPool() # type: ignore[assignment] @@ -779,7 +782,7 @@ class HomeserverTestCase(TestCase): self, username: str, appservice_token: str, - ) -> Tuple[str, str]: + ) -> Tuple[str, Optional[str]]: """Register an appservice user as an application service. Requires the client-facing registration API be registered. @@ -803,7 +806,7 @@ class HomeserverTestCase(TestCase): access_token=appservice_token, ) self.assertEqual(channel.code, 200, channel.json_body) - return channel.json_body["user_id"], channel.json_body["device_id"] + return channel.json_body["user_id"], channel.json_body.get("device_id") def login( self, diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py
index d82822d00d..cfd2882410 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py
@@ -18,7 +18,7 @@ # # import traceback -from typing import Generator, List, NoReturn, Optional +from typing import Any, Coroutine, Generator, List, NoReturn, Optional, Tuple, TypeVar from parameterized import parameterized_class @@ -39,6 +39,7 @@ from synapse.util.async_helpers import ( ObservableDeferred, concurrently_execute, delay_cancellation, + gather_optional_coroutines, stop_cancellation, timeout_deferred, ) @@ -46,6 +47,8 @@ from synapse.util.async_helpers import ( from tests.server import get_clock from tests.unittest import TestCase +T = TypeVar("T") + class ObservableDeferredTest(TestCase): def test_succeed(self) -> None: @@ -317,12 +320,19 @@ class ConcurrentlyExecuteTest(TestCase): await concurrently_execute(callback, [1], 2) except _TestException as e: tb = traceback.extract_tb(e.__traceback__) - # we expect to see "caller", "concurrently_execute", "callback", - # and some magic from inside ensureDeferred that happens when .fail - # is called. + + # Remove twisted internals from the stack, as we don't care + # about the precise details. + tb = traceback.StackSummary( + t for t in tb if "/twisted/" not in t.filename + ) + + # we expect to see "caller", "concurrently_execute" at the top of the stack self.assertEqual(tb[0].name, "caller") self.assertEqual(tb[1].name, "concurrently_execute") - self.assertEqual(tb[-2].name, "callback") + # ... some stack frames from the implementation of `concurrently_execute` ... + # and at the bottom of the stack we expect to see "callback" + self.assertEqual(tb[-1].name, "callback") else: self.fail("No exception thrown") @@ -588,3 +598,106 @@ class AwakenableSleeperTests(TestCase): sleeper.wake("name") self.assertTrue(d1.called) self.assertTrue(d2.called) + + +class GatherCoroutineTests(TestCase): + """Tests for `gather_optional_coroutines`""" + + def make_coroutine(self) -> Tuple[Coroutine[Any, Any, T], "defer.Deferred[T]"]: + """Returns a coroutine and a deferred that it is waiting on to resolve""" + + d: "defer.Deferred[T]" = defer.Deferred() + + async def inner() -> T: + with PreserveLoggingContext(): + return await d + + return inner(), d + + def test_single(self) -> None: + "Test passing in a single coroutine works" + + with LoggingContext("test_ctx") as text_ctx: + deferred: "defer.Deferred[None]" + coroutine, deferred = self.make_coroutine() + + gather_deferred = defer.ensureDeferred( + gather_optional_coroutines(coroutine) + ) + + # We shouldn't have a result yet, and should be in the sentinel + # context. + self.assertNoResult(gather_deferred) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + # Resolving the deferred will resolve the coroutine + deferred.callback(None) + + # All coroutines have resolved, and so we should have the results + result = self.successResultOf(gather_deferred) + self.assertEqual(result, (None,)) + + # We should be back in the normal context. + self.assertEqual(current_context(), text_ctx) + + def test_multiple_resolve(self) -> None: + "Test passing in multiple coroutine that all resolve works" + + with LoggingContext("test_ctx") as test_ctx: + deferred1: "defer.Deferred[int]" + coroutine1, deferred1 = self.make_coroutine() + deferred2: "defer.Deferred[str]" + coroutine2, deferred2 = self.make_coroutine() + + gather_deferred = defer.ensureDeferred( + gather_optional_coroutines(coroutine1, coroutine2) + ) + + # We shouldn't have a result yet, and should be in the sentinel + # context. + self.assertNoResult(gather_deferred) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + # Even if we resolve one of the coroutines, we shouldn't have a result + # yet + deferred2.callback("test") + self.assertNoResult(gather_deferred) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + deferred1.callback(1) + + # All coroutines have resolved, and so we should have the results + result = self.successResultOf(gather_deferred) + self.assertEqual(result, (1, "test")) + + # We should be back in the normal context. + self.assertEqual(current_context(), test_ctx) + + def test_multiple_fail(self) -> None: + "Test passing in multiple coroutine where one fails does the right thing" + + with LoggingContext("test_ctx") as test_ctx: + deferred1: "defer.Deferred[int]" + coroutine1, deferred1 = self.make_coroutine() + deferred2: "defer.Deferred[str]" + coroutine2, deferred2 = self.make_coroutine() + + gather_deferred = defer.ensureDeferred( + gather_optional_coroutines(coroutine1, coroutine2) + ) + + # We shouldn't have a result yet, and should be in the sentinel + # context. + self.assertNoResult(gather_deferred) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + # Throw an exception in one of the coroutines + exc = Exception("test") + deferred2.errback(exc) + + # Expect the gather deferred to immediately fail + result_exc = self.failureResultOf(gather_deferred) + self.assertEqual(result_exc.value, exc) + + # We should be back in the normal context. + self.assertEqual(current_context(), test_ctx) diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py
index 13a4e6ddaa..c052ba2b75 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py
@@ -109,10 +109,13 @@ class TestDependencyChecker(TestCase): def test_checks_ignore_dev_dependencies(self) -> None: """Both generic and per-extra checks should ignore dev dependencies.""" - with patch( - "synapse.util.check_dependencies.metadata.requires", - return_value=["dummypkg >= 1; extra == 'mypy'"], - ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}): + with ( + patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1; extra == 'mypy'"], + ), + patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}), + ): # We're testing that none of these calls raise. with self.mock_installed_package(None): check_requirements() @@ -141,10 +144,13 @@ class TestDependencyChecker(TestCase): def test_check_for_extra_dependencies(self) -> None: """Complain if a package required for an extra is missing or old.""" - with patch( - "synapse.util.check_dependencies.metadata.requires", - return_value=["dummypkg >= 1; extra == 'cool-extra'"], - ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}): + with ( + patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1; extra == 'cool-extra'"], + ), + patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}), + ): with self.mock_installed_package(None): self.assertRaises(DependencyException, check_requirements, "cool-extra") with self.mock_installed_package(old): diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py
index 7cbb1007da..7510657b85 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py
@@ -19,9 +19,7 @@ # # -from typing import Hashable, Tuple - -from typing_extensions import Protocol +from typing import Hashable, Protocol, Tuple from twisted.internet import defer, reactor from twisted.internet.base import ReactorBase diff --git a/tests/util/test_stream_change_cache.py b/tests/util/test_stream_change_cache.py
index af1199ef8a..9254bff79b 100644 --- a/tests/util/test_stream_change_cache.py +++ b/tests/util/test_stream_change_cache.py
@@ -53,8 +53,8 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): # return True, whether it's a known entity or not. self.assertTrue(cache.has_entity_changed("user@foo.com", 0)) self.assertTrue(cache.has_entity_changed("not@here.website", 0)) - self.assertTrue(cache.has_entity_changed("user@foo.com", 3)) - self.assertTrue(cache.has_entity_changed("not@here.website", 3)) + self.assertTrue(cache.has_entity_changed("user@foo.com", 2)) + self.assertTrue(cache.has_entity_changed("not@here.website", 2)) def test_entity_has_changed_pops_off_start(self) -> None: """ @@ -76,9 +76,11 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): self.assertTrue("user@foo.com" not in cache._entity_to_key) self.assertEqual( - cache.get_all_entities_changed(3).entities, ["user@elsewhere.org"] + cache.get_all_entities_changed(2).entities, + ["bar@baz.net", "user@elsewhere.org"], ) - self.assertFalse(cache.get_all_entities_changed(2).hit) + self.assertFalse(cache.get_all_entities_changed(1).hit) + self.assertTrue(cache.get_all_entities_changed(2).hit) # If we update an existing entity, it keeps the two existing entities cache.entity_has_changed("bar@baz.net", 5) @@ -89,7 +91,8 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): cache.get_all_entities_changed(3).entities, ["user@elsewhere.org", "bar@baz.net"], ) - self.assertFalse(cache.get_all_entities_changed(2).hit) + self.assertFalse(cache.get_all_entities_changed(1).hit) + self.assertTrue(cache.get_all_entities_changed(2).hit) def test_get_all_entities_changed(self) -> None: """ @@ -114,7 +117,8 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): self.assertEqual( cache.get_all_entities_changed(3).entities, ["user@elsewhere.org"] ) - self.assertFalse(cache.get_all_entities_changed(1).hit) + self.assertFalse(cache.get_all_entities_changed(0).hit) + self.assertTrue(cache.get_all_entities_changed(1).hit) # ... later, things gest more updates cache.entity_has_changed("user@foo.com", 5) @@ -149,7 +153,7 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): # With no entities, it returns True for the past, present, and False for # the future. self.assertTrue(cache.has_any_entity_changed(0)) - self.assertTrue(cache.has_any_entity_changed(1)) + self.assertFalse(cache.has_any_entity_changed(1)) self.assertFalse(cache.has_any_entity_changed(2)) # We add an entity @@ -251,3 +255,28 @@ class StreamChangeCacheTests(unittest.HomeserverTestCase): # Unknown entities will return None self.assertEqual(cache.get_max_pos_of_last_change("not@here.website"), None) + + def test_all_entities_changed(self) -> None: + """ + `StreamChangeCache.all_entities_changed(...)` will mark all entites as changed. + """ + cache = StreamChangeCache("#test", 1, max_size=10) + + cache.entity_has_changed("user@foo.com", 2) + cache.entity_has_changed("bar@baz.net", 3) + cache.entity_has_changed("user@elsewhere.org", 4) + + cache.all_entities_changed(5) + + # Everything should be marked as changed before the stream position where the + # change occurred. + self.assertTrue(cache.has_entity_changed("user@foo.com", 4)) + self.assertTrue(cache.has_entity_changed("bar@baz.net", 4)) + self.assertTrue(cache.has_entity_changed("user@elsewhere.org", 4)) + + # Nothing should be marked as changed at/after the stream position where the + # change occurred. In other words, nothing has changed since the stream position + # 5. + self.assertFalse(cache.has_entity_changed("user@foo.com", 5)) + self.assertFalse(cache.has_entity_changed("bar@baz.net", 5)) + self.assertFalse(cache.has_entity_changed("user@elsewhere.org", 5)) diff --git a/tests/util/test_stringutils.py b/tests/util/test_stringutils.py
index 646fd2163e..34c2395ecf 100644 --- a/tests/util/test_stringutils.py +++ b/tests/util/test_stringutils.py
@@ -20,7 +20,11 @@ # from synapse.api.errors import SynapseError -from synapse.util.stringutils import assert_valid_client_secret, base62_encode +from synapse.util.stringutils import ( + assert_valid_client_secret, + base62_encode, + is_namedspaced_grammar, +) from .. import unittest @@ -58,3 +62,25 @@ class StringUtilsTestCase(unittest.TestCase): self.assertEqual("10", base62_encode(62)) self.assertEqual("1c", base62_encode(100)) self.assertEqual("001c", base62_encode(100, minwidth=4)) + + def test_namespaced_identifier(self) -> None: + self.assertTrue(is_namedspaced_grammar("test")) + self.assertTrue(is_namedspaced_grammar("m.test")) + self.assertTrue(is_namedspaced_grammar("org.matrix.test")) + self.assertTrue(is_namedspaced_grammar("org.matrix.msc1234")) + self.assertTrue(is_namedspaced_grammar("test")) + self.assertTrue(is_namedspaced_grammar("t-e_s.t")) + + # Must start with letter. + self.assertFalse(is_namedspaced_grammar("1test")) + self.assertFalse(is_namedspaced_grammar("-test")) + self.assertFalse(is_namedspaced_grammar("_test")) + self.assertFalse(is_namedspaced_grammar(".test")) + + # Must contain only a-z, 0-9, -, _, .. + self.assertFalse(is_namedspaced_grammar("test/")) + self.assertFalse(is_namedspaced_grammar('test"')) + self.assertFalse(is_namedspaced_grammar("testö")) + + # Must be < 255 characters. + self.assertFalse(is_namedspaced_grammar("t" * 256)) diff --git a/tests/util/test_task_scheduler.py b/tests/util/test_task_scheduler.py
index 30f0510c9f..7f6e63bd49 100644 --- a/tests/util/test_task_scheduler.py +++ b/tests/util/test_task_scheduler.py
@@ -18,8 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # - -from typing import Optional, Tuple +from typing import List, Optional, Tuple from twisted.internet.task import deferLater from twisted.test.proto_helpers import MemoryReactor @@ -104,38 +103,48 @@ class TestTaskScheduler(HomeserverTestCase): ) ) - # This is to give the time to the active tasks to finish + def get_tasks_of_status(status: TaskStatus) -> List[ScheduledTask]: + tasks = ( + self.get_success(self.task_scheduler.get_task(task_id)) + for task_id in task_ids + ) + return [t for t in tasks if t is not None and t.status == status] + + # At this point, there should be MAX_CONCURRENT_RUNNING_TASKS active tasks and + # one scheduled task. + self.assertEqual( + len(get_tasks_of_status(TaskStatus.ACTIVE)), + TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS, + ) + self.assertEqual( + len(get_tasks_of_status(TaskStatus.SCHEDULED)), + 1, + ) + + # Give the time to the active tasks to finish self.reactor.advance(1) - # Check that only MAX_CONCURRENT_RUNNING_TASKS tasks has run and that one + # Check that MAX_CONCURRENT_RUNNING_TASKS tasks have run and that one # is still scheduled. - tasks = [ - self.get_success(self.task_scheduler.get_task(task_id)) - for task_id in task_ids - ] - - self.assertEquals( - len( - [t for t in tasks if t is not None and t.status == TaskStatus.COMPLETE] - ), + self.assertEqual( + len(get_tasks_of_status(TaskStatus.COMPLETE)), TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS, ) + scheduled_tasks = get_tasks_of_status(TaskStatus.SCHEDULED) + self.assertEqual(len(scheduled_tasks), 1) - scheduled_tasks = [ - t for t in tasks if t is not None and t.status == TaskStatus.ACTIVE - ] - self.assertEquals(len(scheduled_tasks), 1) + # The scheduled task should start 0.1s after the first of the active tasks + # finishes + self.reactor.advance(0.1) + self.assertEqual(len(get_tasks_of_status(TaskStatus.ACTIVE)), 1) - # We need to wait for the next run of the scheduler loop - self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) + # ... and should finally complete after another second self.reactor.advance(1) - - # Check that the last task has been properly executed after the next scheduler loop run prev_scheduled_task = self.get_success( self.task_scheduler.get_task(scheduled_tasks[0].id) ) assert prev_scheduled_task is not None - self.assertEquals( + self.assertEqual( prev_scheduled_task.status, TaskStatus.COMPLETE, ) diff --git a/tests/util/test_threepids.py b/tests/util/test_threepids.py deleted file mode 100644
index 15575cc572..0000000000 --- a/tests/util/test_threepids.py +++ /dev/null
@@ -1,55 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright 2020 Dirk Klimpel -# Copyright (C) 2023 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# <https://www.gnu.org/licenses/agpl-3.0.html>. -# -# Originally licensed under the Apache License, Version 2.0: -# <http://www.apache.org/licenses/LICENSE-2.0>. -# -# [This file includes modifications made by New Vector Limited] -# -# - -from synapse.util.threepids import canonicalise_email - -from tests.unittest import HomeserverTestCase - - -class CanonicaliseEmailTests(HomeserverTestCase): - def test_no_at(self) -> None: - with self.assertRaises(ValueError): - canonicalise_email("address-without-at.bar") - - def test_two_at(self) -> None: - with self.assertRaises(ValueError): - canonicalise_email("foo@foo@test.bar") - - def test_bad_format(self) -> None: - with self.assertRaises(ValueError): - canonicalise_email("user@bad.example.net@good.example.com") - - def test_valid_format(self) -> None: - self.assertEqual(canonicalise_email("foo@test.bar"), "foo@test.bar") - - def test_domain_to_lower(self) -> None: - self.assertEqual(canonicalise_email("foo@TEST.BAR"), "foo@test.bar") - - def test_domain_with_umlaut(self) -> None: - self.assertEqual(canonicalise_email("foo@Öumlaut.com"), "foo@öumlaut.com") - - def test_address_casefold(self) -> None: - self.assertEqual( - canonicalise_email("Strauß@Example.com"), "strauss@example.com" - ) - - def test_address_trim(self) -> None: - self.assertEqual(canonicalise_email(" foo@test.bar "), "foo@test.bar") diff --git a/tests/util/test_wheel_timer.py b/tests/util/test_wheel_timer.py
index 173a7cfaec..6fa575a18e 100644 --- a/tests/util/test_wheel_timer.py +++ b/tests/util/test_wheel_timer.py
@@ -28,53 +28,55 @@ class WheelTimerTestCase(unittest.TestCase): def test_single_insert_fetch(self) -> None: wheel: WheelTimer[object] = WheelTimer(bucket_size=5) - obj = object() - wheel.insert(100, obj, 150) + wheel.insert(100, "1", 150) self.assertListEqual(wheel.fetch(101), []) self.assertListEqual(wheel.fetch(110), []) self.assertListEqual(wheel.fetch(120), []) self.assertListEqual(wheel.fetch(130), []) self.assertListEqual(wheel.fetch(149), []) - self.assertListEqual(wheel.fetch(156), [obj]) + self.assertListEqual(wheel.fetch(156), ["1"]) self.assertListEqual(wheel.fetch(170), []) def test_multi_insert(self) -> None: wheel: WheelTimer[object] = WheelTimer(bucket_size=5) - obj1 = object() - obj2 = object() - obj3 = object() - wheel.insert(100, obj1, 150) - wheel.insert(105, obj2, 130) - wheel.insert(106, obj3, 160) + wheel.insert(100, "1", 150) + wheel.insert(105, "2", 130) + wheel.insert(106, "3", 160) self.assertListEqual(wheel.fetch(110), []) - self.assertListEqual(wheel.fetch(135), [obj2]) + self.assertListEqual(wheel.fetch(135), ["2"]) self.assertListEqual(wheel.fetch(149), []) - self.assertListEqual(wheel.fetch(158), [obj1]) + self.assertListEqual(wheel.fetch(158), ["1"]) self.assertListEqual(wheel.fetch(160), []) - self.assertListEqual(wheel.fetch(200), [obj3]) + self.assertListEqual(wheel.fetch(200), ["3"]) self.assertListEqual(wheel.fetch(210), []) def test_insert_past(self) -> None: wheel: WheelTimer[object] = WheelTimer(bucket_size=5) - obj = object() - wheel.insert(100, obj, 50) - self.assertListEqual(wheel.fetch(120), [obj]) + wheel.insert(100, "1", 50) + self.assertListEqual(wheel.fetch(120), ["1"]) def test_insert_past_multi(self) -> None: wheel: WheelTimer[object] = WheelTimer(bucket_size=5) - obj1 = object() - obj2 = object() - obj3 = object() - wheel.insert(100, obj1, 150) - wheel.insert(100, obj2, 140) - wheel.insert(100, obj3, 50) - self.assertListEqual(wheel.fetch(110), [obj3]) + wheel.insert(100, "1", 150) + wheel.insert(100, "2", 140) + wheel.insert(100, "3", 50) + self.assertListEqual(wheel.fetch(110), ["3"]) self.assertListEqual(wheel.fetch(120), []) - self.assertListEqual(wheel.fetch(147), [obj2]) - self.assertListEqual(wheel.fetch(200), [obj1]) + self.assertListEqual(wheel.fetch(147), ["2"]) + self.assertListEqual(wheel.fetch(200), ["1"]) self.assertListEqual(wheel.fetch(240), []) + + def test_multi_insert_then_past(self) -> None: + wheel: WheelTimer[object] = WheelTimer(bucket_size=5) + + wheel.insert(100, "1", 150) + wheel.insert(100, "2", 160) + wheel.insert(100, "3", 155) + + self.assertListEqual(wheel.fetch(110), []) + self.assertListEqual(wheel.fetch(158), ["1"]) diff --git a/tests/utils.py b/tests/utils.py
index 9fd26ef348..57986c18bc 100644 --- a/tests/utils.py +++ b/tests/utils.py
@@ -28,6 +28,7 @@ from typing import ( Callable, Dict, List, + Literal, Optional, Tuple, Type, @@ -37,7 +38,7 @@ from typing import ( ) import attr -from typing_extensions import Literal, ParamSpec +from typing_extensions import ParamSpec from synapse.api.constants import EventTypes from synapse.api.room_versions import RoomVersions @@ -181,7 +182,6 @@ def default_config( "max_mau_value": 50, "mau_trial_days": 0, "mau_stats_only": False, - "mau_limits_reserved_threepids": [], "admin_contact": None, "rc_message": {"per_second": 10000, "burst_count": 10000}, "rc_registration": {"per_second": 10000, "burst_count": 10000}, @@ -200,9 +200,8 @@ def default_config( "per_user": {"per_second": 10000, "burst_count": 10000}, }, "rc_3pid_validation": {"per_second": 10000, "burst_count": 10000}, - "saml2_enabled": False, + "rc_presence": {"per_user": {"per_second": 10000, "burst_count": 10000}}, "public_baseurl": None, - "default_identity_server": None, "key_refresh_interval": 24 * 60 * 60 * 1000, "old_signing_keys": {}, "tls_fingerprints": [], @@ -399,11 +398,24 @@ class TestTimeout(Exception): class test_timeout: + """ + FIXME: This implementation is not robust against other code tight-looping and + preventing the signals propagating and timing out the test. You may need to add + `time.sleep(0.1)` to your code in order to allow this timeout to work correctly. + + ```py + with test_timeout(3): + while True: + my_checking_func() + time.sleep(0.1) + ``` + """ + def __init__(self, seconds: int, error_message: Optional[str] = None) -> None: - if error_message is None: - error_message = "test timed out after {}s.".format(seconds) + self.error_message = f"Test timed out after {seconds}s" + if error_message is not None: + self.error_message += f": {error_message}" self.seconds = seconds - self.error_message = error_message def handle_timeout(self, signum: int, frame: Optional[FrameType]) -> None: raise TestTimeout(self.error_message) diff --git a/tox.ini b/tox.ini
index 4cd9dfb966..a506b5034d 100644 --- a/tox.ini +++ b/tox.ini
@@ -1,5 +1,5 @@ [tox] -envlist = py37, py38, py39, py310 +envlist = py39, py310, py311, py312, py313 # we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208 minversion = 2.3.2