summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.buildkite/docker-compose-env13
-rw-r--r--.buildkite/docker-compose.py35.pg95.yaml23
-rw-r--r--.buildkite/docker-compose.py37.pg11.yaml23
-rw-r--r--.buildkite/docker-compose.py37.pg95.yaml23
-rw-r--r--.buildkite/docker-compose.py38.pg12.yaml23
-rw-r--r--.buildkite/docker-compose.sytest.py37.redis.yaml22
-rwxr-xr-x.buildkite/merge_base_branch.sh2
-rw-r--r--.buildkite/pipeline.yml496
-rw-r--r--MANIFEST.in9
-rw-r--r--UPGRADE.rst85
-rw-r--r--changelog.d/1.feature1
-rw-r--r--changelog.d/10.bugfix1
-rw-r--r--changelog.d/11.feature1
-rw-r--r--changelog.d/12.feature1
-rw-r--r--changelog.d/13.feature1
-rw-r--r--changelog.d/14.feature1
-rw-r--r--changelog.d/15.misc1
-rw-r--r--changelog.d/17.misc1
-rw-r--r--changelog.d/18.feature1
-rw-r--r--changelog.d/19.feature1
-rw-r--r--changelog.d/2.bugfix1
-rw-r--r--changelog.d/20.bugfix1
-rw-r--r--changelog.d/21.bugfix1
-rw-r--r--changelog.d/28.bugfix1
-rw-r--r--changelog.d/29.misc1
-rw-r--r--changelog.d/3.bugfix1
-rw-r--r--changelog.d/30.misc1
-rw-r--r--changelog.d/32.bugfix1
-rw-r--r--changelog.d/39.feature1
-rw-r--r--changelog.d/4.bugfix1
-rw-r--r--changelog.d/45.feature1
-rw-r--r--changelog.d/46.feature1
-rw-r--r--changelog.d/47.misc1
-rw-r--r--changelog.d/48.feature1
-rw-r--r--changelog.d/5.bugfix1
-rw-r--r--changelog.d/50.feature1
-rw-r--r--changelog.d/5083.feature1
-rw-r--r--changelog.d/5098.misc1
-rw-r--r--changelog.d/51.feature1
-rw-r--r--changelog.d/5214.feature1
-rw-r--r--changelog.d/53.feature1
-rw-r--r--changelog.d/5416.misc1
-rw-r--r--changelog.d/5420.feature1
-rw-r--r--changelog.d/56.misc1
-rw-r--r--changelog.d/5610.feature1
-rw-r--r--changelog.d/57.misc1
-rw-r--r--changelog.d/5702.bugfix1
-rw-r--r--changelog.d/5760.feature1
-rw-r--r--changelog.d/58.misc1
-rw-r--r--changelog.d/59.feature1
-rw-r--r--changelog.d/6.bugfix1
-rw-r--r--changelog.d/60.misc1
-rw-r--r--changelog.d/61.misc1
-rw-r--r--changelog.d/62.misc1
-rw-r--r--changelog.d/63.feature1
-rw-r--r--changelog.d/64.bugfix1
-rw-r--r--changelog.d/65.bugfix1
-rw-r--r--changelog.d/66.bugfix1
-rw-r--r--changelog.d/7124.bugfix1
-rw-r--r--changelog.d/7796.bugfix1
-rw-r--r--changelog.d/8004.feature1
-rw-r--r--changelog.d/8216.misc1
-rw-r--r--changelog.d/8230.misc1
-rw-r--r--changelog.d/8236.bugfix1
-rw-r--r--changelog.d/8243.misc1
-rw-r--r--changelog.d/8247.misc1
-rw-r--r--changelog.d/8250.misc1
-rw-r--r--changelog.d/8256.misc1
-rw-r--r--changelog.d/8257.misc1
-rw-r--r--changelog.d/8258.misc1
-rw-r--r--changelog.d/8259.misc1
-rw-r--r--changelog.d/8260.misc1
-rw-r--r--changelog.d/8261.misc1
-rw-r--r--changelog.d/8262.bugfix1
-rw-r--r--changelog.d/8265.bugfix1
-rw-r--r--changelog.d/8268.bugfix1
-rw-r--r--changelog.d/8275.feature1
-rw-r--r--changelog.d/8278.bugfix1
-rw-r--r--changelog.d/8279.misc1
-rw-r--r--changelog.d/8282.misc1
-rw-r--r--changelog.d/8285.misc1
-rw-r--r--changelog.d/8287.bugfix1
-rw-r--r--changelog.d/8479.feature1
-rw-r--r--changelog.d/9.misc1
-rw-r--r--contrib/systemd/README.md17
-rw-r--r--docs/sample_config.yaml208
-rw-r--r--docs/sphinx/conf.py8
-rw-r--r--mypy.ini3
-rw-r--r--res/templates-dinsic/mail-Vector.css7
-rw-r--r--res/templates-dinsic/mail.css156
-rw-r--r--res/templates-dinsic/notif.html45
-rw-r--r--res/templates-dinsic/notif.txt16
-rw-r--r--res/templates-dinsic/notif_mail.html55
-rw-r--r--res/templates-dinsic/notif_mail.txt10
-rw-r--r--res/templates-dinsic/room.html33
-rw-r--r--res/templates-dinsic/room.txt9
-rwxr-xr-xscripts-dev/check-newsfragment6
-rwxr-xr-xscripts/synapse_port_db2
-rw-r--r--synapse/api/auth.py13
-rw-r--r--synapse/api/errors.py3
-rw-r--r--synapse/api/filtering.py2
-rw-r--r--synapse/api/urls.py1
-rw-r--r--synapse/app/admin_cmd.py3
-rw-r--r--synapse/app/generic_worker.py2
-rw-r--r--synapse/app/homeserver.py10
-rw-r--r--synapse/config/_base.py3
-rw-r--r--synapse/config/emailconfig.py12
-rw-r--r--synapse/config/logger.py25
-rw-r--r--synapse/config/password.py4
-rw-r--r--synapse/config/ratelimiting.py9
-rw-r--r--synapse/config/registration.py128
-rw-r--r--synapse/config/repository.py30
-rw-r--r--synapse/config/server.py107
-rw-r--r--synapse/config/user_directory.py9
-rw-r--r--synapse/events/spamcheck.py73
-rw-r--r--synapse/events/third_party_rules.py53
-rw-r--r--synapse/federation/sender/__init__.py11
-rw-r--r--synapse/federation/sender/per_destination_queue.py11
-rw-r--r--synapse/federation/transport/client.py16
-rw-r--r--synapse/federation/transport/server.py53
-rw-r--r--synapse/handlers/account_validity.py34
-rw-r--r--synapse/handlers/deactivate_account.py4
-rw-r--r--synapse/handlers/directory.py10
-rw-r--r--synapse/handlers/events.py4
-rw-r--r--synapse/handlers/federation.py79
-rw-r--r--synapse/handlers/identity.py277
-rw-r--r--synapse/handlers/initial_sync.py11
-rw-r--r--synapse/handlers/message.py7
-rw-r--r--synapse/handlers/pagination.py49
-rw-r--r--synapse/handlers/presence.py3
-rw-r--r--synapse/handlers/profile.py214
-rw-r--r--synapse/handlers/receipts.py15
-rw-r--r--synapse/handlers/register.py131
-rw-r--r--synapse/handlers/room.py34
-rw-r--r--synapse/handlers/room_member.py98
-rw-r--r--synapse/handlers/room_member_worker.py9
-rw-r--r--synapse/handlers/set_password.py3
-rw-r--r--synapse/handlers/sync.py5
-rw-r--r--synapse/logging/utils.py6
-rw-r--r--synapse/module_api/__init__.py82
-rw-r--r--synapse/notifier.py5
-rw-r--r--synapse/push/baserules.py12
-rw-r--r--synapse/push/emailpusher.py2
-rw-r--r--synapse/push/httppusher.py2
-rw-r--r--synapse/push/mailer.py2
-rw-r--r--synapse/push/pusherpool.py19
-rw-r--r--synapse/python_dependencies.py2
-rw-r--r--synapse/replication/http/membership.py10
-rw-r--r--synapse/replication/tcp/client.py3
-rw-r--r--synapse/res/templates/password_reset_confirmation.html16
-rw-r--r--synapse/rest/__init__.py7
-rw-r--r--synapse/rest/client/v1/presence.py4
-rw-r--r--synapse/rest/client/v1/profile.py35
-rw-r--r--synapse/rest/client/v1/push_rule.py15
-rw-r--r--synapse/rest/client/v1/room.py3
-rw-r--r--synapse/rest/client/v2_alpha/account.py368
-rw-r--r--synapse/rest/client/v2_alpha/account_data.py7
-rw-r--r--synapse/rest/client/v2_alpha/register.py249
-rw-r--r--synapse/rest/client/v2_alpha/user_directory.py143
-rw-r--r--synapse/rest/client/versions.py5
-rw-r--r--synapse/rest/media/v1/filepath.py19
-rw-r--r--synapse/rest/media/v1/media_repository.py69
-rw-r--r--synapse/rest/media/v1/media_storage.py28
-rw-r--r--synapse/rest/media/v1/thumbnail_resource.py5
-rw-r--r--synapse/rest/media/v1/thumbnailer.py14
-rw-r--r--synapse/rest/synapse/__init__.py14
-rw-r--r--synapse/rest/synapse/client/__init__.py14
-rw-r--r--synapse/rest/synapse/client/password_reset.py127
-rw-r--r--synapse/rulecheck/__init__.py0
-rw-r--r--synapse/rulecheck/domain_rule_checker.py181
-rw-r--r--synapse/storage/database.py4
-rw-r--r--synapse/storage/databases/main/devices.py7
-rw-r--r--synapse/storage/databases/main/media_repository.py57
-rw-r--r--synapse/storage/databases/main/profile.py132
-rw-r--r--synapse/storage/databases/main/purge_events.py2
-rw-r--r--synapse/storage/databases/main/push_rule.py131
-rw-r--r--synapse/storage/databases/main/registration.py79
-rw-r--r--synapse/storage/databases/main/room.py22
-rw-r--r--synapse/storage/databases/main/schema/delta/48/profiles_batch.sql36
-rw-r--r--synapse/storage/databases/main/schema/delta/50/profiles_deactivated_users.sql23
-rw-r--r--synapse/storage/databases/main/schema/delta/55/profile_replication_status_index.sql16
-rw-r--r--synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres33
-rw-r--r--synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite44
-rw-r--r--synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql28
-rw-r--r--synapse/storage/databases/main/schema/delta/58/15_catchup_destination_rooms.sql42
-rw-r--r--synapse/storage/databases/main/schema/delta/58/16populate_stats_process_rooms_fix.sql22
-rw-r--r--synapse/storage/databases/main/schema/delta/58/17_catchup_last_successful.sql21
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres15
-rw-r--r--synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite4
-rw-r--r--synapse/storage/databases/main/stats.py36
-rw-r--r--synapse/storage/databases/main/stream.py21
-rw-r--r--synapse/storage/databases/main/transactions.py104
-rw-r--r--synapse/storage/prepare_database.py26
-rw-r--r--synapse/storage/util/id_generators.py43
-rw-r--r--synapse/streams/config.py61
-rw-r--r--synapse/third_party_rules/__init__.py14
-rw-r--r--synapse/third_party_rules/access_rules.py947
-rw-r--r--synapse/types.py166
-rw-r--r--synapse/util/__init__.py2
-rw-r--r--synapse/util/async_helpers.py135
-rw-r--r--synapse/util/distributor.py50
-rw-r--r--synapse/util/frozenutils.py5
-rw-r--r--synapse/util/threepids.py40
-rw-r--r--sytest-blacklist34
-rw-r--r--tests/federation/test_complexity.py30
-rw-r--r--tests/federation/test_federation_catch_up.py158
-rw-r--r--tests/federation/test_federation_sender.py2
-rw-r--r--tests/handlers/test_auth.py20
-rw-r--r--tests/handlers/test_identity.py116
-rw-r--r--tests/handlers/test_profile.py10
-rw-r--r--tests/handlers/test_register.py118
-rw-r--r--tests/handlers/test_stats.py23
-rw-r--r--tests/handlers/test_typing.py3
-rw-r--r--tests/handlers/test_user_directory.py135
-rw-r--r--tests/http/federation/test_matrix_federation_agent.py2
-rw-r--r--tests/module_api/test_api.py146
-rw-r--r--tests/push/test_http.py12
-rw-r--r--tests/replication/test_federation_sender_shard.py10
-rw-r--r--tests/rest/admin/test_user.py6
-rw-r--r--tests/rest/client/test_identity.py145
-rw-r--r--tests/rest/client/test_retention.py2
-rw-r--r--tests/rest/client/test_room_access_rules.py1066
-rw-r--r--tests/rest/client/third_party_rules.py31
-rw-r--r--tests/rest/client/v1/test_push_rule_attrs.py448
-rw-r--r--tests/rest/client/v2_alpha/test_account.py132
-rw-r--r--tests/rest/client/v2_alpha/test_register.py205
-rw-r--r--tests/rest/media/v1/test_media_storage.py39
-rw-r--r--tests/rulecheck/__init__.py14
-rw-r--r--tests/rulecheck/test_domainrulecheck.py334
-rw-r--r--tests/server.py15
-rw-r--r--tests/server_notices/test_resource_limits_server_notices.py14
-rw-r--r--tests/storage/test_client_ips.py2
-rw-r--r--tests/storage/test_id_generators.py50
-rw-r--r--tests/storage/test_main.py2
-rw-r--r--tests/storage/test_monthly_active_users.py16
-rw-r--r--tests/storage/test_profile.py4
-rw-r--r--tests/test_types.py22
-rw-r--r--tests/test_utils/__init__.py13
-rw-r--r--tests/unittest.py4
-rw-r--r--tests/utils.py2
-rw-r--r--tox.ini7
241 files changed, 9225 insertions, 936 deletions
diff --git a/.buildkite/docker-compose-env b/.buildkite/docker-compose-env
new file mode 100644

index 0000000000..85b102d07f --- /dev/null +++ b/.buildkite/docker-compose-env
@@ -0,0 +1,13 @@ +CI +BUILDKITE +BUILDKITE_BUILD_NUMBER +BUILDKITE_BRANCH +BUILDKITE_BUILD_NUMBER +BUILDKITE_JOB_ID +BUILDKITE_BUILD_URL +BUILDKITE_PROJECT_SLUG +BUILDKITE_COMMIT +BUILDKITE_PULL_REQUEST +BUILDKITE_TAG +CODECOV_TOKEN +TRIAL_FLAGS diff --git a/.buildkite/docker-compose.py35.pg95.yaml b/.buildkite/docker-compose.py35.pg95.yaml new file mode 100644
index 0000000000..c6e8280e65 --- /dev/null +++ b/.buildkite/docker-compose.py35.pg95.yaml
@@ -0,0 +1,23 @@ +version: '3.1' + +services: + + postgres: + image: postgres:9.5 + environment: + POSTGRES_PASSWORD: postgres + POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8" + command: -c fsync=off + + testenv: + image: python:3.5 + depends_on: + - postgres + env_file: docker-compose-env + environment: + SYNAPSE_POSTGRES_HOST: postgres + SYNAPSE_POSTGRES_USER: postgres + SYNAPSE_POSTGRES_PASSWORD: postgres + working_dir: /src + volumes: + - ${BUILDKITE_BUILD_CHECKOUT_PATH}:/src diff --git a/.buildkite/docker-compose.py37.pg11.yaml b/.buildkite/docker-compose.py37.pg11.yaml new file mode 100644
index 0000000000..411c37f213 --- /dev/null +++ b/.buildkite/docker-compose.py37.pg11.yaml
@@ -0,0 +1,23 @@ +version: '3.1' + +services: + + postgres: + image: postgres:11 + environment: + POSTGRES_PASSWORD: postgres + POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8" + command: -c fsync=off + + testenv: + image: python:3.7 + depends_on: + - postgres + env_file: docker-compose-env + environment: + SYNAPSE_POSTGRES_HOST: postgres + SYNAPSE_POSTGRES_USER: postgres + SYNAPSE_POSTGRES_PASSWORD: postgres + working_dir: /src + volumes: + - ${BUILDKITE_BUILD_CHECKOUT_PATH}:/src diff --git a/.buildkite/docker-compose.py37.pg95.yaml b/.buildkite/docker-compose.py37.pg95.yaml new file mode 100644
index 0000000000..54ca794072 --- /dev/null +++ b/.buildkite/docker-compose.py37.pg95.yaml
@@ -0,0 +1,23 @@ +version: '3.1' + +services: + + postgres: + image: postgres:9.5 + environment: + POSTGRES_PASSWORD: postgres + POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8" + command: -c fsync=off + + testenv: + image: python:3.7 + depends_on: + - postgres + env_file: docker-compose-env + environment: + SYNAPSE_POSTGRES_HOST: postgres + SYNAPSE_POSTGRES_USER: postgres + SYNAPSE_POSTGRES_PASSWORD: postgres + working_dir: /src + volumes: + - ${BUILDKITE_BUILD_CHECKOUT_PATH}:/src diff --git a/.buildkite/docker-compose.py38.pg12.yaml b/.buildkite/docker-compose.py38.pg12.yaml new file mode 100644
index 0000000000..934a34cf02 --- /dev/null +++ b/.buildkite/docker-compose.py38.pg12.yaml
@@ -0,0 +1,23 @@ +version: '3.1' + +services: + + postgres: + image: postgres:12 + environment: + POSTGRES_PASSWORD: postgres + POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8" + command: -c fsync=off + + testenv: + image: python:3.8 + depends_on: + - postgres + env_file: docker-compose-env + environment: + SYNAPSE_POSTGRES_HOST: postgres + SYNAPSE_POSTGRES_USER: postgres + SYNAPSE_POSTGRES_PASSWORD: postgres + working_dir: /src + volumes: + - ${BUILDKITE_BUILD_CHECKOUT_PATH}:/src diff --git a/.buildkite/docker-compose.sytest.py37.redis.yaml b/.buildkite/docker-compose.sytest.py37.redis.yaml new file mode 100644
index 0000000000..b9e80cc557 --- /dev/null +++ b/.buildkite/docker-compose.sytest.py37.redis.yaml
@@ -0,0 +1,22 @@ +version: '3.1' + +services: + + redis: + image: redis:5.0 + + sytest: + image: matrixdotorg/sytest-synapse:py37 + depends_on: + - redis + env_file: docker-compose-env + environment: + POSTGRES: "1" + WORKERS: "1" + BLACKLIST: "synapse-blacklist-with-workers" + REDIS: "redis" + working_dir: "/src" + entrypoint: "" + volumes: + - ${BUILDKITE_BUILD_CHECKOUT_PATH}:/src + - ${BUILDKITE_BUILD_CHECKOUT_PATH}/logs:/logs diff --git a/.buildkite/merge_base_branch.sh b/.buildkite/merge_base_branch.sh
index 361440fd1a..d0a7aef8cb 100755 --- a/.buildkite/merge_base_branch.sh +++ b/.buildkite/merge_base_branch.sh
@@ -12,7 +12,7 @@ if [[ -z $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then # It probably hasn't had a PR opened yet. Since all PRs land on develop, we # can probably assume it's based on it and will be merged into it. - GITBASE="develop" + GITBASE="dinsic" else # Get the reference, using the GitHub API GITBASE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml new file mode 100644
index 0000000000..5877ff0883 --- /dev/null +++ b/.buildkite/pipeline.yml
@@ -0,0 +1,496 @@ +env: + COVERALLS_REPO_TOKEN: wsJWOby6j0uCYFiCes3r0XauxO27mx8lD + +steps: + - label: "\U0001F9F9 Check Style" + command: + - "python -m pip install tox" + - "tox -e check_codestyle" + plugins: + - docker#v3.0.1: + image: "python:3.6" + mount-buildkite-agent: false + + - label: "\U0001F9F9 packaging" + command: + - "python -m pip install tox" + - "tox -e packaging" + plugins: + - docker#v3.0.1: + image: "python:3.6" + mount-buildkite-agent: false + + - label: "\U0001F9F9 isort" + command: + - "python -m pip install tox" + - "tox -e check_isort" + plugins: + - docker#v3.0.1: + image: "python:3.6" + mount-buildkite-agent: false + + - label: ":newspaper: Newsfile" + command: + - "python -m pip install tox" + - "scripts-dev/check-newsfragment" + branches: "!master !develop !release-*" + plugins: + - docker#v3.0.1: + image: "python:3.6" + propagate-environment: true + mount-buildkite-agent: false + + - label: "\U0001F9F9 check-sample-config" + command: + - "python -m pip install tox" + - "tox -e check-sampleconfig" + plugins: + - docker#v3.0.1: + image: "python:3.6" + mount-buildkite-agent: false + + - label: ":mypy: mypy" + command: + - "python -m pip install tox" + - "tox -e mypy" + plugins: + - docker#v3.0.1: + image: "python:3.7" + mount-buildkite-agent: false + + - wait + + ################################################################################ + # + # `trial` tests + # + ################################################################################ + + - label: ":python: 3.5 / SQLite / Old Deps" + command: + - ".buildkite/scripts/test_old_deps.sh" + env: + TRIAL_FLAGS: "-j 2" + plugins: + - docker#v3.0.1: + image: "ubuntu:xenial" # We use xenial to get an old sqlite and python + workdir: "/src" + mount-buildkite-agent: false + propagate-environment: true + - artifacts#v1.2.0: + upload: [ "_trial_temp/*/*.log" ] +# - matrix-org/coveralls#v1.0: +# parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: ":python: 3.5 / SQLite" + command: + - "python -m pip install tox" + - "tox -e py35,combine" + env: + TRIAL_FLAGS: "-j 2" + plugins: + - docker#v3.0.1: + image: "python:3.5" + workdir: "/src" + mount-buildkite-agent: false + propagate-environment: true + - artifacts#v1.2.0: + upload: [ "_trial_temp/*/*.log" ] +# - matrix-org/coveralls#v1.0: +# parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: ":python: 3.6 / SQLite" + command: + - "python -m pip install tox" + - "tox -e py36,combine" + env: + TRIAL_FLAGS: "-j 2" + plugins: + - docker#v3.0.1: + image: "python:3.6" + workdir: "/src" + mount-buildkite-agent: false + propagate-environment: true + - artifacts#v1.2.0: + upload: [ "_trial_temp/*/*.log" ] +# - matrix-org/coveralls#v1.0: +# parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: ":python: 3.7 / SQLite" + command: + - "python -m pip install tox" + - "tox -e py37,combine" + env: + TRIAL_FLAGS: "-j 2" + plugins: + - docker#v3.0.1: + image: "python:3.7" + workdir: "/src" + mount-buildkite-agent: false + propagate-environment: true + - artifacts#v1.2.0: + upload: [ "_trial_temp/*/*.log" ] +# - matrix-org/coveralls#v1.0: +# parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: ":python: 3.5 / :postgres: 9.5" + agents: + queue: "medium" + env: + TRIAL_FLAGS: "-j 8" + command: + - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,combine'" + plugins: + - matrix-org/download#v1.1.0: + urls: + - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose.py35.pg95.yaml + - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose-env + - docker-compose#v2.1.0: + run: testenv + config: + - /tmp/download-${BUILDKITE_BUILD_ID}/docker-compose.py35.pg95.yaml + - artifacts#v1.2.0: + upload: [ "_trial_temp/*/*.log" ] +# - matrix-org/coveralls#v1.0: +# parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: ":python: 3.7 / :postgres: 11" + agents: + queue: "medium" + env: + TRIAL_FLAGS: "-j 8" + command: + - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'" + plugins: + - matrix-org/download#v1.1.0: + urls: + - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose.py37.pg11.yaml + - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose-env + - docker-compose#v2.1.0: + run: testenv + config: + - /tmp/download-${BUILDKITE_BUILD_ID}/docker-compose.py37.pg11.yaml + - artifacts#v1.2.0: + upload: [ "_trial_temp/*/*.log" ] +# - matrix-org/coveralls#v1.0: +# parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: ":python: 3.8 / :postgres: 12" + agents: + queue: "medium" + env: + TRIAL_FLAGS: "-j 8" + command: + - "bash -c 'python -m pip install tox && python -m tox -e py38-postgres,combine'" + plugins: + - matrix-org/download#v1.1.0: + urls: + - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose.py38.pg12.yaml + - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose-env + - docker-compose#v2.1.0: + run: testenv + config: + - /tmp/download-${BUILDKITE_BUILD_ID}/docker-compose.py38.pg12.yaml + - artifacts#v1.2.0: + upload: [ "_trial_temp/*/*.log" ] +# - matrix-org/coveralls#v1.0: +# parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + ################################################################################ + # + # Sytest + # + ################################################################################ + + - label: "SyTest - :python: 3.5 / SQLite / Monolith" + agents: + queue: "medium" + command: + - "bash .buildkite/merge_base_branch.sh" + - "bash /bootstrap.sh synapse" + plugins: + - docker#v3.0.1: + image: "matrixdotorg/sytest-synapse:dinsic" + propagate-environment: true + always-pull: true + workdir: "/src" + entrypoint: "/bin/sh" + init: false + shell: ["-x", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/results.tap" ] + - matrix-org/annotate: + path: "logs/annotate.md" + style: "error" +# - matrix-org/coveralls#v1.0: +# parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Monolith" + agents: + queue: "medium" + env: + POSTGRES: "1" + command: + - "bash .buildkite/merge_base_branch.sh" + - "bash /bootstrap.sh synapse" + plugins: + - docker#v3.0.1: + image: "matrixdotorg/sytest-synapse:dinsic" + propagate-environment: true + always-pull: true + workdir: "/src" + entrypoint: "/bin/sh" + init: false + shell: ["-x", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/results.tap" ] + - matrix-org/annotate: + path: "logs/annotate.md" + style: "error" +# - matrix-org/coveralls#v1.0: +# parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Workers" + agents: + queue: "medium" + env: + MULTI_POSTGRES: "1" # Test with split out databases + POSTGRES: "1" + WORKERS: "1" + BLACKLIST: "synapse-blacklist-with-workers" + command: + - "bash .buildkite/merge_base_branch.sh" + - "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'" + - "bash /bootstrap.sh synapse" + plugins: + - docker#v3.0.1: + image: "matrixdotorg/sytest-synapse:dinsic" + propagate-environment: true + always-pull: true + workdir: "/src" + entrypoint: "/bin/sh" + init: false + shell: ["-x", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/results.tap" ] + - matrix-org/annotate: + path: "logs/annotate.md" + style: "error" + # - matrix-org/coveralls#v1.0: + # parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + + - label: "SyTest - :python: 3.8 / :postgres: 12 / Monolith" + agents: + queue: "medium" + env: + POSTGRES: "1" + command: + - "bash .buildkite/merge_base_branch.sh" + - "bash /bootstrap.sh synapse" + plugins: + - docker#v3.0.1: + image: "matrixdotorg/sytest-synapse:dinsic" + propagate-environment: true + always-pull: true + workdir: "/src" + entrypoint: "/bin/sh" + init: false + shell: ["-x", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/results.tap" ] + - matrix-org/annotate: + path: "logs/annotate.md" + style: "error" +# - matrix-org/coveralls#v1.0: +# parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: "SyTest - :python: 3.7 / :postgres: 11 / Workers" + agents: + queue: "medium" + env: + MULTI_POSTGRES: "1" # Test with split out databases + POSTGRES: "1" + WORKERS: "1" + BLACKLIST: "synapse-blacklist-with-workers" + command: + - "bash .buildkite/merge_base_branch.sh" + - "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'" + - "bash /bootstrap.sh synapse" + plugins: + - docker#v3.0.1: + image: "matrixdotorg/sytest-synapse:dinsic" + propagate-environment: true + always-pull: true + workdir: "/src" + entrypoint: "/bin/sh" + init: false + shell: ["-x", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/results.tap" ] + - matrix-org/annotate: + path: "logs/annotate.md" + style: "error" + # - matrix-org/coveralls#v1.0: + # parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + +# TODO: Enable once Synapse v1.13.0 is merged in +# - label: "SyTest - :python: 3.7 / :postgres: 11 / Workers / :redis: Redis" +# agents: +# queue: "medium" +# command: +# - bash -c "cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers && ./.buildkite/merge_base_branch.sh && /bootstrap.sh synapse --redis-host redis" +# plugins: +# - matrix-org/download#v1.1.0: +# urls: +# - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose.sytest.py37.redis.yaml +# - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose-env +# - docker-compose#v2.1.0: +# run: sytest +# config: +# - /tmp/download-${BUILDKITE_BUILD_ID}/docker-compose.sytest.py37.redis.yaml +# - artifacts#v1.2.0: +# upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/results.tap" ] +# - matrix-org/annotate: +# path: "logs/annotate.md" +# style: "error" +## - matrix-org/coveralls#v1.0: +## parallel: "true" +# retry: +# automatic: +# - exit_status: -1 +# limit: 2 +# - exit_status: 2 +# limit: 2 + + ################################################################################ + # + # synapse_port_db + # + ################################################################################ + + - label: "synapse_port_db / :python: 3.5 / :postgres: 9.5" + agents: + queue: "medium" + command: + - "bash .buildkite/scripts/test_synapse_port_db.sh" + plugins: + - matrix-org/download#v1.1.0: + urls: + - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose.py35.pg95.yaml + - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose-env + - docker-compose#v2.1.0: + run: testenv + config: + - /tmp/download-${BUILDKITE_BUILD_ID}/docker-compose.py35.pg95.yaml + - artifacts#v1.2.0: + upload: [ "_trial_temp/*/*.log" ] +# - matrix-org/coveralls#v1.0: +# parallel: "true" + + - label: "synapse_port_db / :python: 3.7 / :postgres: 11" + agents: + queue: "medium" + command: + - "bash .buildkite/scripts/test_synapse_port_db.sh" + plugins: + - matrix-org/download#v1.1.0: + urls: + - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose.py37.pg11.yaml + - https://raw.githubusercontent.com/matrix-org/synapse-dinsic/dinsic/.buildkite/docker-compose-env + - docker-compose#v2.1.0: + run: testenv + config: + - /tmp/download-${BUILDKITE_BUILD_ID}/docker-compose.py37.pg11.yaml + - artifacts#v1.2.0: + upload: [ "_trial_temp/*/*.log" ] +# - matrix-org/coveralls#v1.0: +# parallel: "true" + +# - wait: ~ +# continue_on_failure: true +# +# - label: Trigger webhook +# command: "curl -k https://coveralls.io/webhook?repo_token=$COVERALLS_REPO_TOKEN -d \"payload[build_num]=$BUILDKITE_BUILD_NUMBER&payload[status]=done\"" diff --git a/MANIFEST.in b/MANIFEST.in
index 120ce5b776..0a9cf4f51c 100644 --- a/MANIFEST.in +++ b/MANIFEST.in
@@ -1,4 +1,5 @@ include synctl +include sytest-blacklist include LICENSE include VERSION include *.rst @@ -51,3 +52,11 @@ prune demo/etc prune docker prune snap prune stubs + +exclude jenkins* +recursive-exclude jenkins *.sh + +# FIXME: we shouldn't have these templates here +recursive-include res/templates-dinsic *.css +recursive-include res/templates-dinsic *.html +recursive-include res/templates-dinsic *.txt diff --git a/UPGRADE.rst b/UPGRADE.rst
index 6492fa011f..63c3de1503 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst
@@ -75,6 +75,91 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.21.0 +==================== + +Forwarding ``/_synapse/client`` through your reverse proxy +---------------------------------------------------------- + +The `reverse proxy documentation +<https://github.com/matrix-org/synapse/blob/develop/docs/reverse_proxy.md>`_ has been updated +to include reverse proxy directives for ``/_synapse/client/*`` endpoints. As the user password +reset flow now uses endpoints under this prefix, **you must update your reverse proxy +configurations for user password reset to work**. + +Additionally, note that the `Synapse worker documentation +<https://github.com/matrix-org/synapse/blob/develop/docs/workers.md>`_ has been updated to + state that the ``/_synapse/client/password_reset/email/submit_token`` endpoint can be handled +by all workers. If you make use of Synapse's worker feature, please update your reverse proxy +configuration to reflect this change. + +New HTML templates +------------------ + +A new HTML template, +`password_reset_confirmation.html <https://github.com/matrix-org/synapse/blob/develop/synapse/res/templates/password_reset_confirmation.html>`_, +has been added to the ``synapse/res/templates`` directory. If you are using a +custom template directory, you may want to copy the template over and modify it. + +Note that as of v1.20.0, templates do not need to be included in custom template +directories for Synapse to start. The default templates will be used if a custom +template cannot be found. + +This page will appear to the user after clicking a password reset link that has +been emailed to them. + +To complete password reset, the page must include a way to make a `POST` +request to +``/_synapse/client/password_reset/{medium}/submit_token`` +with the query parameters from the original link, presented as a URL-encoded form. See the file +itself for more details. + +Updated Single Sign-on HTML Templates +------------------------------------- + +The ``saml_error.html`` template was removed from Synapse and replaced with the +``sso_error.html`` template. If your Synapse is configured to use SAML and a +custom ``sso_redirect_confirm_template_dir`` configuration then any customisations +of the ``saml_error.html`` template will need to be merged into the ``sso_error.html`` +template. These templates are similar, but the parameters are slightly different: + +* The ``msg`` parameter should be renamed to ``error_description``. +* There is no longer a ``code`` parameter for the response code. +* A string ``error`` parameter is available that includes a short hint of why a + user is seeing the error page. + +ThirdPartyEventRules breaking changes +------------------------------------- + +This release introduces a backwards-incompatible change to modules making use of +`ThirdPartyEventRules` in Synapse. + +The `http_client` argument is no longer passed to modules as they are initialised. Instead, +modules are expected to make use of the `http_client` property on the `ModuleApi` class. +Modules are now passed a `module_api` argument during initialisation, which is an instance of +`ModuleApi`. + +New HTML templates +------------------ + +A new HTML template, +`password_reset_confirmation.html <https://github.com/matrix-org/synapse/blob/develop/synapse/res/templates/password_reset_confirmation.html>`_, +has been added to the ``synapse/res/templates`` directory. If you are using a +custom template directory, you may want to copy the template over and modify it. + +Note that as of v1.20.0, templates do not need to be included in custom template +directories for Synapse to start. The default templates will be used if a custom +template cannot be found. + +This page will appear to the user after clicking a password reset link that has +been emailed to them. + +To complete password reset, the page must include a way to make a `POST` +request to +``/_synapse/client/password_reset/{medium}/submit_token`` +with the query parameters from the original link, presented as a URL-encoded form. See the file +itself for more details. + Upgrading to v1.18.0 ==================== diff --git a/changelog.d/1.feature b/changelog.d/1.feature new file mode 100644
index 0000000000..845642e445 --- /dev/null +++ b/changelog.d/1.feature
@@ -0,0 +1 @@ +Forbid changing the name, avatar or topic of a direct room. diff --git a/changelog.d/10.bugfix b/changelog.d/10.bugfix new file mode 100644
index 0000000000..51f89f46dd --- /dev/null +++ b/changelog.d/10.bugfix
@@ -0,0 +1 @@ +Don't apply retention policy based filtering on state events. diff --git a/changelog.d/11.feature b/changelog.d/11.feature new file mode 100644
index 0000000000..362e4b1efd --- /dev/null +++ b/changelog.d/11.feature
@@ -0,0 +1 @@ +Allow server admins to configure a custom global rate-limiting for third party invites. \ No newline at end of file diff --git a/changelog.d/12.feature b/changelog.d/12.feature new file mode 100644
index 0000000000..8e6e7a28af --- /dev/null +++ b/changelog.d/12.feature
@@ -0,0 +1 @@ +Add `/user/:user_id/info` CS servlet and to give user deactivated/expired information. \ No newline at end of file diff --git a/changelog.d/13.feature b/changelog.d/13.feature new file mode 100644
index 0000000000..c2d2e93abf --- /dev/null +++ b/changelog.d/13.feature
@@ -0,0 +1 @@ +Hide expired users from the user directory, and optionally re-add them on renewal. \ No newline at end of file diff --git a/changelog.d/14.feature b/changelog.d/14.feature new file mode 100644
index 0000000000..020d0bac1e --- /dev/null +++ b/changelog.d/14.feature
@@ -0,0 +1 @@ +User displaynames now have capitalised letters after - symbols. \ No newline at end of file diff --git a/changelog.d/15.misc b/changelog.d/15.misc new file mode 100644
index 0000000000..4cc4a5175f --- /dev/null +++ b/changelog.d/15.misc
@@ -0,0 +1 @@ +Fix the ordering on `scripts/generate_signing_key.py`'s import statement. diff --git a/changelog.d/17.misc b/changelog.d/17.misc new file mode 100644
index 0000000000..58120ab5c7 --- /dev/null +++ b/changelog.d/17.misc
@@ -0,0 +1 @@ +Blacklist some flaky sytests until they're fixed. \ No newline at end of file diff --git a/changelog.d/18.feature b/changelog.d/18.feature new file mode 100644
index 0000000000..f5aa29a6e8 --- /dev/null +++ b/changelog.d/18.feature
@@ -0,0 +1 @@ +Add option `limit_profile_requests_to_known_users` to prevent requirement of a user sharing a room with another user to query their profile information. \ No newline at end of file diff --git a/changelog.d/19.feature b/changelog.d/19.feature new file mode 100644
index 0000000000..95a44a4a89 --- /dev/null +++ b/changelog.d/19.feature
@@ -0,0 +1 @@ +Add `max_avatar_size` and `allowed_avatar_mimetypes` to restrict the size of user avatars and their file type respectively. \ No newline at end of file diff --git a/changelog.d/2.bugfix b/changelog.d/2.bugfix new file mode 100644
index 0000000000..4fe5691468 --- /dev/null +++ b/changelog.d/2.bugfix
@@ -0,0 +1 @@ +Don't treat 3PID revocation as a new 3PID invite. diff --git a/changelog.d/20.bugfix b/changelog.d/20.bugfix new file mode 100644
index 0000000000..8ba53c28f9 --- /dev/null +++ b/changelog.d/20.bugfix
@@ -0,0 +1 @@ +Validate `client_secret` parameter against the regex provided by the C-S spec. \ No newline at end of file diff --git a/changelog.d/21.bugfix b/changelog.d/21.bugfix new file mode 100644
index 0000000000..630d7812f7 --- /dev/null +++ b/changelog.d/21.bugfix
@@ -0,0 +1 @@ +Fix resetting user passwords via a phone number. diff --git a/changelog.d/28.bugfix b/changelog.d/28.bugfix new file mode 100644
index 0000000000..38d7455971 --- /dev/null +++ b/changelog.d/28.bugfix
@@ -0,0 +1 @@ +Fix a bug causing account validity renewal emails to be sent even if the feature is turned off in some cases. diff --git a/changelog.d/29.misc b/changelog.d/29.misc new file mode 100644
index 0000000000..720e0ddcfb --- /dev/null +++ b/changelog.d/29.misc
@@ -0,0 +1 @@ +Improve performance when making `.well-known` requests by sharing the SSL options between requests. diff --git a/changelog.d/3.bugfix b/changelog.d/3.bugfix new file mode 100644
index 0000000000..cc4bcefa80 --- /dev/null +++ b/changelog.d/3.bugfix
@@ -0,0 +1 @@ +Fix encoding on password reset HTML responses in Python 2. diff --git a/changelog.d/30.misc b/changelog.d/30.misc new file mode 100644
index 0000000000..ae68554be3 --- /dev/null +++ b/changelog.d/30.misc
@@ -0,0 +1 @@ +Improve performance when making HTTP requests to sygnal, sydent, etc, by sharing the SSL context object between connections. diff --git a/changelog.d/32.bugfix b/changelog.d/32.bugfix new file mode 100644
index 0000000000..b6e7b90710 --- /dev/null +++ b/changelog.d/32.bugfix
@@ -0,0 +1 @@ +Fixes a bug when using the default display name during registration. diff --git a/changelog.d/39.feature b/changelog.d/39.feature new file mode 100644
index 0000000000..426b7ef27e --- /dev/null +++ b/changelog.d/39.feature
@@ -0,0 +1 @@ +Merge Synapse v1.12.4 `master` into the `dinsic` branch. \ No newline at end of file diff --git a/changelog.d/4.bugfix b/changelog.d/4.bugfix new file mode 100644
index 0000000000..fe717920a6 --- /dev/null +++ b/changelog.d/4.bugfix
@@ -0,0 +1 @@ +Fix handling of filtered strings in Python 3. diff --git a/changelog.d/45.feature b/changelog.d/45.feature new file mode 100644
index 0000000000..d45ac34ac1 --- /dev/null +++ b/changelog.d/45.feature
@@ -0,0 +1 @@ +Merge Synapse mainline releases v1.13.0 through v1.14.0 into the `dinsic` branch. \ No newline at end of file diff --git a/changelog.d/46.feature b/changelog.d/46.feature new file mode 100644
index 0000000000..7872d956e3 --- /dev/null +++ b/changelog.d/46.feature
@@ -0,0 +1 @@ +Add a bulk version of the User Info API. Deprecate the single-use version. \ No newline at end of file diff --git a/changelog.d/47.misc b/changelog.d/47.misc new file mode 100644
index 0000000000..1d6596d788 --- /dev/null +++ b/changelog.d/47.misc
@@ -0,0 +1 @@ +Improve performance of `mark_expired_users_as_inactive` background job. \ No newline at end of file diff --git a/changelog.d/48.feature b/changelog.d/48.feature new file mode 100644
index 0000000000..b7939f3f51 --- /dev/null +++ b/changelog.d/48.feature
@@ -0,0 +1 @@ +Prevent `/register` from raising `M_USER_IN_USE` until UI Auth has been completed. Have `/register/available` always return true. diff --git a/changelog.d/5.bugfix b/changelog.d/5.bugfix new file mode 100644
index 0000000000..53f57f46ca --- /dev/null +++ b/changelog.d/5.bugfix
@@ -0,0 +1 @@ +Fix room retention policy management in worker mode. diff --git a/changelog.d/50.feature b/changelog.d/50.feature new file mode 100644
index 0000000000..0801622c8a --- /dev/null +++ b/changelog.d/50.feature
@@ -0,0 +1 @@ +Merge Synapse mainline v1.15.1 into the `dinsic` branch. \ No newline at end of file diff --git a/changelog.d/5083.feature b/changelog.d/5083.feature new file mode 100644
index 0000000000..2ffdd37eef --- /dev/null +++ b/changelog.d/5083.feature
@@ -0,0 +1 @@ +Adds auth_profile_reqs option to require access_token to GET /profile endpoints on CS API. diff --git a/changelog.d/5098.misc b/changelog.d/5098.misc new file mode 100644
index 0000000000..9cd83bf226 --- /dev/null +++ b/changelog.d/5098.misc
@@ -0,0 +1 @@ +Add workarounds for pep-517 install errors. diff --git a/changelog.d/51.feature b/changelog.d/51.feature new file mode 100644
index 0000000000..e5c9990ad6 --- /dev/null +++ b/changelog.d/51.feature
@@ -0,0 +1 @@ +Add `bind_new_user_emails_to_sydent` option for automatically binding user's emails after registration. diff --git a/changelog.d/5214.feature b/changelog.d/5214.feature new file mode 100644
index 0000000000..6c0f15c901 --- /dev/null +++ b/changelog.d/5214.feature
@@ -0,0 +1 @@ +Allow server admins to define and enforce a password policy (MSC2000). diff --git a/changelog.d/53.feature b/changelog.d/53.feature new file mode 100644
index 0000000000..96c628e824 --- /dev/null +++ b/changelog.d/53.feature
@@ -0,0 +1 @@ +Merge mainline Synapse v1.18.0 into the `dinsic` branch. \ No newline at end of file diff --git a/changelog.d/5416.misc b/changelog.d/5416.misc new file mode 100644
index 0000000000..155e8c7cd3 --- /dev/null +++ b/changelog.d/5416.misc
@@ -0,0 +1 @@ +Add unique index to the profile_replication_status table. diff --git a/changelog.d/5420.feature b/changelog.d/5420.feature new file mode 100644
index 0000000000..745864b903 --- /dev/null +++ b/changelog.d/5420.feature
@@ -0,0 +1 @@ +Add configuration option to hide new users from the user directory. diff --git a/changelog.d/56.misc b/changelog.d/56.misc new file mode 100644
index 0000000000..f66c55af21 --- /dev/null +++ b/changelog.d/56.misc
@@ -0,0 +1 @@ +Temporarily revert commit a3fbc23. diff --git a/changelog.d/5610.feature b/changelog.d/5610.feature new file mode 100644
index 0000000000..b99514f97e --- /dev/null +++ b/changelog.d/5610.feature
@@ -0,0 +1 @@ +Implement new custom event rules for power levels. diff --git a/changelog.d/57.misc b/changelog.d/57.misc new file mode 100644
index 0000000000..1bbe8611cd --- /dev/null +++ b/changelog.d/57.misc
@@ -0,0 +1 @@ +Add user_id back to presence in worker too https://github.com/matrix-org/synapse/commit/0bbbd10513008d30c17eb1d1e7ba1d091fb44ec7 . diff --git a/changelog.d/5702.bugfix b/changelog.d/5702.bugfix new file mode 100644
index 0000000000..43b6e39b13 --- /dev/null +++ b/changelog.d/5702.bugfix
@@ -0,0 +1 @@ +Fix 3PID invite to invite association detection in the Tchap room access rules. diff --git a/changelog.d/5760.feature b/changelog.d/5760.feature new file mode 100644
index 0000000000..90302d793e --- /dev/null +++ b/changelog.d/5760.feature
@@ -0,0 +1 @@ +Force the access rule to be "restricted" if the join rule is "public". diff --git a/changelog.d/58.misc b/changelog.d/58.misc new file mode 100644
index 0000000000..64098a68a4 --- /dev/null +++ b/changelog.d/58.misc
@@ -0,0 +1 @@ +Don't push if an user account has expired. diff --git a/changelog.d/59.feature b/changelog.d/59.feature new file mode 100644
index 0000000000..aa07f762d1 --- /dev/null +++ b/changelog.d/59.feature
@@ -0,0 +1 @@ +Freeze a room when the last administrator in the room leaves. \ No newline at end of file diff --git a/changelog.d/6.bugfix b/changelog.d/6.bugfix new file mode 100644
index 0000000000..43ab65cc95 --- /dev/null +++ b/changelog.d/6.bugfix
@@ -0,0 +1 @@ +Don't forbid membership events which membership isn't 'join' or 'invite' in restricted rooms, so that users who got into these rooms before the access rules started to be enforced can leave them. diff --git a/changelog.d/60.misc b/changelog.d/60.misc new file mode 100644
index 0000000000..d2625a4f65 --- /dev/null +++ b/changelog.d/60.misc
@@ -0,0 +1 @@ +Make all rooms noisy by default. diff --git a/changelog.d/61.misc b/changelog.d/61.misc new file mode 100644
index 0000000000..0c3ba98628 --- /dev/null +++ b/changelog.d/61.misc
@@ -0,0 +1 @@ +Change the minimum power levels for invites and other state events in new rooms. \ No newline at end of file diff --git a/changelog.d/62.misc b/changelog.d/62.misc new file mode 100644
index 0000000000..1e26456595 --- /dev/null +++ b/changelog.d/62.misc
@@ -0,0 +1 @@ +Type hinting and other cleanups for `synapse.third_party_rules.access_rules`. \ No newline at end of file diff --git a/changelog.d/63.feature b/changelog.d/63.feature new file mode 100644
index 0000000000..b45f38fa94 --- /dev/null +++ b/changelog.d/63.feature
@@ -0,0 +1 @@ +Make AccessRules use the public rooms directory instead of checking a room's join rules on rule change. diff --git a/changelog.d/64.bugfix b/changelog.d/64.bugfix new file mode 100644
index 0000000000..60c077af94 --- /dev/null +++ b/changelog.d/64.bugfix
@@ -0,0 +1 @@ +Ensure a `RoomAccessRules` test doesn't accidentally modify a room's access rule and then test that room assuming its access rule has not changed. diff --git a/changelog.d/65.bugfix b/changelog.d/65.bugfix new file mode 100644
index 0000000000..71b498cbc8 --- /dev/null +++ b/changelog.d/65.bugfix
@@ -0,0 +1 @@ +Fix `nextLink` parameters being checked on validation endpoints even if they weren't provided by the client. \ No newline at end of file diff --git a/changelog.d/66.bugfix b/changelog.d/66.bugfix new file mode 100644
index 0000000000..9547cfeddd --- /dev/null +++ b/changelog.d/66.bugfix
@@ -0,0 +1 @@ +Create a mapping between user ID and threepid when binding via the internal Sydent bind API. \ No newline at end of file diff --git a/changelog.d/7124.bugfix b/changelog.d/7124.bugfix new file mode 100644
index 0000000000..8fd177780d --- /dev/null +++ b/changelog.d/7124.bugfix
@@ -0,0 +1 @@ +Fix a bug in the media repository where remote thumbnails with the same size but different crop methods would overwrite each other. Contributed by @deepbluev7. diff --git a/changelog.d/7796.bugfix b/changelog.d/7796.bugfix new file mode 100644
index 0000000000..65e5eb42a2 --- /dev/null +++ b/changelog.d/7796.bugfix
@@ -0,0 +1 @@ +Fix inconsistent handling of non-existent push rules, and stop tracking the `enabled` state of removed push rules. diff --git a/changelog.d/8004.feature b/changelog.d/8004.feature new file mode 100644
index 0000000000..a91b75e0e0 --- /dev/null +++ b/changelog.d/8004.feature
@@ -0,0 +1 @@ +Require the user to confirm that their password should be reset after clicking the email confirmation link. \ No newline at end of file diff --git a/changelog.d/8216.misc b/changelog.d/8216.misc new file mode 100644
index 0000000000..b38911b0e5 --- /dev/null +++ b/changelog.d/8216.misc
@@ -0,0 +1 @@ +Simplify the distributor code to avoid unnecessary work. diff --git a/changelog.d/8230.misc b/changelog.d/8230.misc new file mode 100644
index 0000000000..bf0ba76730 --- /dev/null +++ b/changelog.d/8230.misc
@@ -0,0 +1 @@ +Track the latest event for every destination and room for catch-up after federation outage. diff --git a/changelog.d/8236.bugfix b/changelog.d/8236.bugfix new file mode 100644
index 0000000000..6f04871015 --- /dev/null +++ b/changelog.d/8236.bugfix
@@ -0,0 +1 @@ +Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. diff --git a/changelog.d/8243.misc b/changelog.d/8243.misc new file mode 100644
index 0000000000..f7375d32d3 --- /dev/null +++ b/changelog.d/8243.misc
@@ -0,0 +1 @@ +Remove the 'populate_stats_process_rooms_2' background job and restore functionality to 'populate_stats_process_rooms'. \ No newline at end of file diff --git a/changelog.d/8247.misc b/changelog.d/8247.misc new file mode 100644
index 0000000000..3c27803be4 --- /dev/null +++ b/changelog.d/8247.misc
@@ -0,0 +1 @@ +Track the `stream_ordering` of the last successfully-sent event to every destination, so we can use this information to 'catch up' a remote server after an outage. diff --git a/changelog.d/8250.misc b/changelog.d/8250.misc new file mode 100644
index 0000000000..b6896a9300 --- /dev/null +++ b/changelog.d/8250.misc
@@ -0,0 +1 @@ +Clean up type hints for `PaginationConfig`. diff --git a/changelog.d/8256.misc b/changelog.d/8256.misc new file mode 100644
index 0000000000..bf0ba76730 --- /dev/null +++ b/changelog.d/8256.misc
@@ -0,0 +1 @@ +Track the latest event for every destination and room for catch-up after federation outage. diff --git a/changelog.d/8257.misc b/changelog.d/8257.misc new file mode 100644
index 0000000000..47ac583eb4 --- /dev/null +++ b/changelog.d/8257.misc
@@ -0,0 +1 @@ +Fix non-user visible bug in implementation of `MultiWriterIdGenerator.get_current_token_for_writer`. diff --git a/changelog.d/8258.misc b/changelog.d/8258.misc new file mode 100644
index 0000000000..3c27803be4 --- /dev/null +++ b/changelog.d/8258.misc
@@ -0,0 +1 @@ +Track the `stream_ordering` of the last successfully-sent event to every destination, so we can use this information to 'catch up' a remote server after an outage. diff --git a/changelog.d/8259.misc b/changelog.d/8259.misc new file mode 100644
index 0000000000..a26779a664 --- /dev/null +++ b/changelog.d/8259.misc
@@ -0,0 +1 @@ +Switch to the JSON implementation from the standard library. diff --git a/changelog.d/8260.misc b/changelog.d/8260.misc new file mode 100644
index 0000000000..164eea8b59 --- /dev/null +++ b/changelog.d/8260.misc
@@ -0,0 +1 @@ +Add type hints to `synapse.util.async_helpers`. diff --git a/changelog.d/8261.misc b/changelog.d/8261.misc new file mode 100644
index 0000000000..bc91e9375c --- /dev/null +++ b/changelog.d/8261.misc
@@ -0,0 +1 @@ +Simplify tests that mock asynchronous functions. diff --git a/changelog.d/8262.bugfix b/changelog.d/8262.bugfix new file mode 100644
index 0000000000..2b84927de3 --- /dev/null +++ b/changelog.d/8262.bugfix
@@ -0,0 +1 @@ +Upgrade canonicaljson to version 1.4.0 to fix an unicode encoding issue. diff --git a/changelog.d/8265.bugfix b/changelog.d/8265.bugfix new file mode 100644
index 0000000000..981a836d21 --- /dev/null +++ b/changelog.d/8265.bugfix
@@ -0,0 +1 @@ +Fix logstanding bug which could lead to incomplete database upgrades on SQLite. diff --git a/changelog.d/8268.bugfix b/changelog.d/8268.bugfix new file mode 100644
index 0000000000..4b15a60253 --- /dev/null +++ b/changelog.d/8268.bugfix
@@ -0,0 +1 @@ +Fix stack overflow when stderr is redirected to the logging system, and the logging system encounters an error. diff --git a/changelog.d/8275.feature b/changelog.d/8275.feature new file mode 100644
index 0000000000..17549c3df3 --- /dev/null +++ b/changelog.d/8275.feature
@@ -0,0 +1 @@ +Add a config option to specify a whitelist of domains that a user can be redirected to after validating their email or phone number. \ No newline at end of file diff --git a/changelog.d/8278.bugfix b/changelog.d/8278.bugfix new file mode 100644
index 0000000000..50e40ca2a9 --- /dev/null +++ b/changelog.d/8278.bugfix
@@ -0,0 +1 @@ +Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. diff --git a/changelog.d/8279.misc b/changelog.d/8279.misc new file mode 100644
index 0000000000..99f669001f --- /dev/null +++ b/changelog.d/8279.misc
@@ -0,0 +1 @@ +Add type hints to `StreamToken` and `RoomStreamToken` classes. diff --git a/changelog.d/8282.misc b/changelog.d/8282.misc new file mode 100644
index 0000000000..b6896a9300 --- /dev/null +++ b/changelog.d/8282.misc
@@ -0,0 +1 @@ +Clean up type hints for `PaginationConfig`. diff --git a/changelog.d/8285.misc b/changelog.d/8285.misc new file mode 100644
index 0000000000..4646664ba1 --- /dev/null +++ b/changelog.d/8285.misc
@@ -0,0 +1 @@ +Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. \ No newline at end of file diff --git a/changelog.d/8287.bugfix b/changelog.d/8287.bugfix new file mode 100644
index 0000000000..839781aa07 --- /dev/null +++ b/changelog.d/8287.bugfix
@@ -0,0 +1 @@ +Fix edge case where push could get delayed for a user until a later event was pushed. diff --git a/changelog.d/8479.feature b/changelog.d/8479.feature new file mode 100644
index 0000000000..11adeec8a9 --- /dev/null +++ b/changelog.d/8479.feature
@@ -0,0 +1 @@ +Add the ability to send non-membership events into a room via the `ModuleApi`. \ No newline at end of file diff --git a/changelog.d/9.misc b/changelog.d/9.misc new file mode 100644
index 0000000000..24fd12c978 --- /dev/null +++ b/changelog.d/9.misc
@@ -0,0 +1 @@ +Add SyTest to the BuildKite CI. diff --git a/contrib/systemd/README.md b/contrib/systemd/README.md deleted file mode 100644
index 5d42b3464f..0000000000 --- a/contrib/systemd/README.md +++ /dev/null
@@ -1,17 +0,0 @@ -# Setup Synapse with Systemd -This is a setup for managing synapse with a user contributed systemd unit -file. It provides a `matrix-synapse` systemd unit file that should be tailored -to accommodate your installation in accordance with the installation -instructions provided in [installation instructions](../../INSTALL.md). - -## Setup -1. Under the service section, ensure the `User` variable matches which user -you installed synapse under and wish to run it as. -2. Under the service section, ensure the `WorkingDirectory` variable matches -where you have installed synapse. -3. Under the service section, ensure the `ExecStart` variable matches the -appropriate locations of your installation. -4. Copy the `matrix-synapse.service` to `/etc/systemd/system/` -5. Start Synapse: `sudo systemctl start matrix-synapse` -6. Verify Synapse is running: `sudo systemctl status matrix-synapse` -7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse` diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 3528d9e11f..61b4bb75eb 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml
@@ -340,6 +340,74 @@ limit_remote_rooms: # #allow_per_room_profiles: false +# Whether to show the users on this homeserver in the user directory. Defaults to +# 'true'. +# +#show_users_in_user_directory: false + +# Message retention policy at the server level. +# +# Room admins and mods can define a retention period for their rooms using the +# 'm.room.retention' state event, and server admins can cap this period by setting +# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options. +# +# If this feature is enabled, Synapse will regularly look for and purge events +# which are older than the room's maximum retention period. Synapse will also +# filter events received over federation so that events that should have been +# purged are ignored and not stored again. +# +retention: + # The message retention policies feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # Default retention policy. If set, Synapse will apply it to rooms that lack the + # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't + # matter much because Synapse doesn't take it into account yet. + # + #default_policy: + # min_lifetime: 1d + # max_lifetime: 1y + + # Retention policy limits. If set, a user won't be able to send a + # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime' + # that's not within this range. This is especially useful in closed federations, + # in which server admins can make sure every federating server applies the same + # rules. + # + #allowed_lifetime_min: 1d + #allowed_lifetime_max: 1y + + # Server admins can define the settings of the background jobs purging the + # events which lifetime has expired under the 'purge_jobs' section. + # + # If no configuration is provided, a single job will be set up to delete expired + # events in every room daily. + # + # Each job's configuration defines which range of message lifetimes the job + # takes care of. For example, if 'shortest_max_lifetime' is '2d' and + # 'longest_max_lifetime' is '3d', the job will handle purging expired events in + # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and + # lower than or equal to 3 days. Both the minimum and the maximum value of a + # range are optional, e.g. a job with no 'shortest_max_lifetime' and a + # 'longest_max_lifetime' of '3d' will handle every room with a retention policy + # which 'max_lifetime' is lower than or equal to three days. + # + # The rationale for this per-job configuration is that some rooms might have a + # retention policy with a low 'max_lifetime', where history needs to be purged + # of outdated messages on a very frequent basis (e.g. every 5min), but not want + # that purge to be performed by a job that's iterating over every room it knows, + # which would be quite heavy on the server. + # + #purge_jobs: + # - shortest_max_lifetime: 1d + # longest_max_lifetime: 3d + # interval: 5m: + # - shortest_max_lifetime: 3d + # longest_max_lifetime: 1y + # interval: 24h + # How long to keep redacted events in unredacted form in the database. After # this period redacted events get replaced with their redacted form in the DB. # @@ -432,6 +500,24 @@ retention: # #request_token_inhibit_3pid_errors: true +# A list of domains that the domain portion of 'next_link' parameters +# must match. +# +# This parameter is optionally provided by clients while requesting +# validation of an email or phone number, and maps to a link that +# users will be automatically redirected to after validation +# succeeds. Clients can make use this parameter to aid the validation +# process. +# +# The whitelist is applied whether the homeserver or an +# identity server is handling validation. +# +# The default value is no whitelist functionality; all domains are +# allowed. Setting this value to an empty list will instead disallow +# all domains. +# +#next_link_domain_whitelist: ["matrix.org"] + ## TLS ## @@ -749,6 +835,8 @@ log_config: "CONFDIR/SERVERNAME.log.config" # - one for login that ratelimits login requests based on the account the # client is attempting to log into, based on the amount of failed login # attempts for this account. +# - one that ratelimits third-party invites requests based on the account +# that's making the requests. # - one for ratelimiting redactions by room admins. If this is not explicitly # set then it uses the same ratelimiting as per rc_message. This is useful # to allow room admins to deal with abuse quickly. @@ -778,6 +866,10 @@ log_config: "CONFDIR/SERVERNAME.log.config" # per_second: 0.17 # burst_count: 3 # +#rc_third_party_invite: +# per_second: 0.2 +# burst_count: 10 +# #rc_admin_redaction: # per_second: 1 # burst_count: 50 @@ -852,6 +944,30 @@ media_store_path: "DATADIR/media_store" # #max_upload_size: 10M +# The largest allowed size for a user avatar. If not defined, no +# restriction will be imposed. +# +# Note that this only applies when an avatar is changed globally. +# Per-room avatar changes are not affected. See allow_per_room_profiles +# for disabling that functionality. +# +# Note that user avatar changes will not work if this is set without +# using Synapse's local media repo. +# +#max_avatar_size: 10M + +# Allow mimetypes for a user avatar. If not defined, no restriction will +# be imposed. +# +# Note that this only applies when an avatar is changed globally. +# Per-room avatar changes are not affected. See allow_per_room_profiles +# for disabling that functionality. +# +# Note that user avatar changes will not work if this is set without +# using Synapse's local media repo. +# +#allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] + # Maximum number of pixels that will be thumbnailed # #max_image_pixels: 32M @@ -1136,9 +1252,32 @@ account_validity: # #disable_msisdn_registration: true +# Derive the user's matrix ID from a type of 3PID used when registering. +# This overrides any matrix ID the user proposes when calling /register +# The 3PID type should be present in registrations_require_3pid to avoid +# users failing to register if they don't specify the right kind of 3pid. +# +#register_mxid_from_3pid: email + +# Uncomment to set the display name of new users to their email address, +# rather than using the default heuristic. +# +#register_just_use_email_for_display_name: true + # Mandate that users are only allowed to associate certain formats of # 3PIDs with accounts on this server. # +# Use an Identity Server to establish which 3PIDs are allowed to register? +# Overrides allowed_local_3pids below. +# +#check_is_for_allowed_local_3pids: matrix.org +# +# If you are using an IS you can also check whether that IS registers +# pending invites for the given 3PID (and then allow it to sign up on +# the platform): +# +#allow_invited_3pids: false +# #allowed_local_3pids: # - medium: email # pattern: '.*@matrix\.org' @@ -1147,6 +1286,11 @@ account_validity: # - medium: msisdn # pattern: '\+44' +# If true, stop users from trying to change the 3PIDs associated with +# their accounts. +# +#disable_3pid_changes: false + # Enable 3PIDs lookup requests to identity servers from this server. # #enable_3pid_lookup: true @@ -1178,6 +1322,30 @@ account_validity: # #default_identity_server: https://matrix.org +# If enabled, user IDs, display names and avatar URLs will be replicated +# to this server whenever they change. +# This is an experimental API currently implemented by sydent to support +# cross-homeserver user directories. +# +#replicate_user_profiles_to: example.com + +# If specified, attempt to replay registrations, profile changes & 3pid +# bindings on the given target homeserver via the AS API. The HS is authed +# via a given AS token. +# +#shadow_server: +# hs_url: https://shadow.example.com +# hs: shadow.example.com +# as_token: 12u394refgbdhivsia + +# If enabled, don't let users set their own display names/avatars +# other than for the very first time (unless they are a server admin). +# Useful when provisioning users based on the contents of a 3rd party +# directory and to avoid ambiguities. +# +#disable_set_displayname: false +#disable_set_avatar_url: false + # Handle threepid (email/phone etc) registration and password resets through a set of # *trusted* identity servers. Note that this allows the configured identity server to # reset passwords for accounts! @@ -1304,6 +1472,31 @@ account_threepid_delegates: # #auto_join_rooms_for_guests: false +# Rewrite identity server URLs with a map from one URL to another. Applies to URLs +# provided by clients (which have https:// prepended) and those specified +# in `account_threepid_delegates`. URLs should not feature a trailing slash. +# +#rewrite_identity_server_urls: +# "https://somewhere.example.com": "https://somewhereelse.example.com" + +# When a user registers an account with an email address, it can be useful to +# bind that email address to their mxid on an identity server. Typically, this +# requires the user to validate their email address with the identity server. +# However if Synapse itself is handling email validation on registration, the +# user ends up needing to validate their email twice, which leads to poor UX. +# +# It is possible to force Sydent, one identity server implementation, to bind +# threepids using its internal, unauthenticated bind API: +# https://github.com/matrix-org/sydent/#internal-bind-and-unbind-api +# +# Configure the address of a Sydent server here to have Synapse attempt +# to automatically bind users' emails following registration. The +# internal bind API must be reachable from Synapse, but should NOT be +# exposed to any third party, as it allows the creation of bindings +# without validation. +# +#bind_new_user_emails_to_sydent: https://example.com:8091 + ## Metrics ### @@ -2021,9 +2214,13 @@ email: # * The contents of password reset emails sent by the homeserver: # 'password_reset.html' and 'password_reset.txt' # - # * HTML pages for success and failure that a user will see when they follow - # the link in the password reset email: 'password_reset_success.html' and - # 'password_reset_failure.html' + # * An HTML page that a user will see when they follow the link in the password + # reset email. The user will be asked to confirm the action before their + # password is reset: 'password_reset_confirmation.html' + # + # * HTML pages for success and failure that a user will see when they confirm + # the password reset flow using the page above: 'password_reset_success.html' + # and 'password_reset_failure.html' # # * The contents of address verification emails sent during registration: # 'registration.html' and 'registration.txt' @@ -2217,6 +2414,11 @@ spam_checker: #user_directory: # enabled: true # search_all_users: false +# +# # If this is set, user search will be delegated to this ID server instead +# # of synapse performing the search itself. +# # This is an experimental API. +# defer_to_id_server: https://id.example.com # User Consent configuration diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py
index ca4b879526..5c5a115ca9 100644 --- a/docs/sphinx/conf.py +++ b/docs/sphinx/conf.py
@@ -12,8 +12,8 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys import os +import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -191,11 +191,11 @@ htmlhelp_basename = "Synapsedoc" latex_elements = { # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', + # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', + # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. - #'preamble': '', + # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples diff --git a/mypy.ini b/mypy.ini
index 7764f17856..460392377e 100644 --- a/mypy.ini +++ b/mypy.ini
@@ -34,7 +34,7 @@ files = synapse/http/federation/well_known_resolver.py, synapse/http/server.py, synapse/http/site.py, - synapse/logging/, + synapse/logging, synapse/metrics, synapse/module_api, synapse/notifier.py, @@ -54,6 +54,7 @@ files = synapse/storage/util, synapse/streams, synapse/types.py, + synapse/util/async_helpers.py, synapse/util/caches/descriptors.py, synapse/util/caches/stream_change_cache.py, synapse/util/metrics.py, diff --git a/res/templates-dinsic/mail-Vector.css b/res/templates-dinsic/mail-Vector.css new file mode 100644
index 0000000000..6a3e36eda1 --- /dev/null +++ b/res/templates-dinsic/mail-Vector.css
@@ -0,0 +1,7 @@ +.header { + border-bottom: 4px solid #e4f7ed ! important; +} + +.notif_link a, .footer a { + color: #76CFA6 ! important; +} diff --git a/res/templates-dinsic/mail.css b/res/templates-dinsic/mail.css new file mode 100644
index 0000000000..5ab3e1b06d --- /dev/null +++ b/res/templates-dinsic/mail.css
@@ -0,0 +1,156 @@ +body { + margin: 0px; +} + +pre, code { + word-break: break-word; + white-space: pre-wrap; +} + +#page { + font-family: 'Open Sans', Helvetica, Arial, Sans-Serif; + font-color: #454545; + font-size: 12pt; + width: 100%; + padding: 20px; +} + +#inner { + width: 640px; +} + +.header { + width: 100%; + height: 87px; + color: #454545; + border-bottom: 4px solid #e5e5e5; +} + +.logo { + text-align: right; + margin-left: 20px; +} + +.salutation { + padding-top: 10px; + font-weight: bold; +} + +.summarytext { +} + +.room { + width: 100%; + color: #454545; + border-bottom: 1px solid #e5e5e5; +} + +.room_header td { + padding-top: 38px; + padding-bottom: 10px; + border-bottom: 1px solid #e5e5e5; +} + +.room_name { + vertical-align: middle; + font-size: 18px; + font-weight: bold; +} + +.room_header h2 { + margin-top: 0px; + margin-left: 75px; + font-size: 20px; +} + +.room_avatar { + width: 56px; + line-height: 0px; + text-align: center; + vertical-align: middle; +} + +.room_avatar img { + width: 48px; + height: 48px; + object-fit: cover; + border-radius: 24px; +} + +.notif { + border-bottom: 1px solid #e5e5e5; + margin-top: 16px; + padding-bottom: 16px; +} + +.historical_message .sender_avatar { + opacity: 0.3; +} + +/* spell out opacity and historical_message class names for Outlook aka Word */ +.historical_message .sender_name { + color: #e3e3e3; +} + +.historical_message .message_time { + color: #e3e3e3; +} + +.historical_message .message_body { + color: #c7c7c7; +} + +.historical_message td, +.message td { + padding-top: 10px; +} + +.sender_avatar { + width: 56px; + text-align: center; + vertical-align: top; +} + +.sender_avatar img { + margin-top: -2px; + width: 32px; + height: 32px; + border-radius: 16px; +} + +.sender_name { + display: inline; + font-size: 13px; + color: #a2a2a2; +} + +.message_time { + text-align: right; + width: 100px; + font-size: 11px; + color: #a2a2a2; +} + +.message_body { +} + +.notif_link td { + padding-top: 10px; + padding-bottom: 10px; + font-weight: bold; +} + +.notif_link a, .footer a { + color: #454545; + text-decoration: none; +} + +.debug { + font-size: 10px; + color: #888; +} + +.footer { + margin-top: 20px; + text-align: center; +} \ No newline at end of file diff --git a/res/templates-dinsic/notif.html b/res/templates-dinsic/notif.html new file mode 100644
index 0000000000..bcdfeea9da --- /dev/null +++ b/res/templates-dinsic/notif.html
@@ -0,0 +1,45 @@ +{% for message in notif.messages %} + <tr class="{{ "historical_message" if message.is_historical else "message" }}"> + <td class="sender_avatar"> + {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} + {% if message.sender_avatar_url %} + <img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" /> + {% else %} + {% if message.sender_hash % 3 == 0 %} + <img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" /> + {% elif message.sender_hash % 3 == 1 %} + <img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" /> + {% else %} + <img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" /> + {% endif %} + {% endif %} + {% endif %} + </td> + <td class="message_contents"> + {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} + <div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div> + {% endif %} + <div class="message_body"> + {% if message.msgtype == "m.text" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.emote" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.notice" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.image" %} + <img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" /> + {% elif message.msgtype == "m.file" %} + <span class="filename">{{ message.body_text_plain }}</span> + {% endif %} + </div> + </td> + <td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td> + </tr> +{% endfor %} +<tr class="notif_link"> + <td></td> + <td> + <a href="{{ notif.link }}">Voir {{ room.title }}</a> + </td> + <td></td> +</tr> diff --git a/res/templates-dinsic/notif.txt b/res/templates-dinsic/notif.txt new file mode 100644
index 0000000000..3dff1bb570 --- /dev/null +++ b/res/templates-dinsic/notif.txt
@@ -0,0 +1,16 @@ +{% for message in notif.messages %} +{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) +{% if message.msgtype == "m.text" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.emote" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.notice" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.image" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.file" %} +{{ message.body_text_plain }} +{% endif %} +{% endfor %} + +Voir {{ room.title }} à {{ notif.link }} diff --git a/res/templates-dinsic/notif_mail.html b/res/templates-dinsic/notif_mail.html new file mode 100644
index 0000000000..1e1efa74b2 --- /dev/null +++ b/res/templates-dinsic/notif_mail.html
@@ -0,0 +1,55 @@ +<!doctype html> +<html lang="en"> + <head> + <style type="text/css"> + {% include 'mail.css' without context %} + {% include "mail-%s.css" % app_name ignore missing without context %} + </style> + </head> + <body> + <table id="page"> + <tr> + <td> </td> + <td id="inner"> + <table class="header"> + <tr> + <td> + <div class="salutation">Bonjour {{ user_display_name }},</div> + <div class="summarytext">{{ summary_text }}</div> + </td> + <td class="logo"> + {% if app_name == "Riot" %} + <img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/> + {% elif app_name == "Vector" %} + <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/> + {% else %} + <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/> + {% endif %} + </td> + </tr> + </table> + {% for room in rooms %} + {% include 'room.html' with context %} + {% endfor %} + <div class="footer"> + <a href="{{ unsubscribe_link }}">Se désinscrire</a> + <br/> + <br/> + <div class="debug"> + Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because + an event was received at {{ reason.received_at|format_ts("%c") }} + which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago, + {% if reason.last_sent_ts %} + and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }}, + which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago. + {% else %} + and we don't have a last time we sent a mail for this room. + {% endif %} + </div> + </div> + </td> + <td> </td> + </tr> + </table> + </body> +</html> diff --git a/res/templates-dinsic/notif_mail.txt b/res/templates-dinsic/notif_mail.txt new file mode 100644
index 0000000000..fae877426f --- /dev/null +++ b/res/templates-dinsic/notif_mail.txt
@@ -0,0 +1,10 @@ +Bonjour {{ user_display_name }}, + +{{ summary_text }} + +{% for room in rooms %} +{% include 'room.txt' with context %} +{% endfor %} + +Vous pouvez désactiver ces notifications en cliquant ici {{ unsubscribe_link }} + diff --git a/res/templates-dinsic/room.html b/res/templates-dinsic/room.html new file mode 100644
index 0000000000..0487b1b11c --- /dev/null +++ b/res/templates-dinsic/room.html
@@ -0,0 +1,33 @@ +<table class="room"> + <tr class="room_header"> + <td class="room_avatar"> + {% if room.avatar_url %} + <img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" /> + {% else %} + {% if room.hash % 3 == 0 %} + <img alt="" src="https://vector.im/beta/img/76cfa6.png" /> + {% elif room.hash % 3 == 1 %} + <img alt="" src="https://vector.im/beta/img/50e2c2.png" /> + {% else %} + <img alt="" src="https://vector.im/beta/img/f4c371.png" /> + {% endif %} + {% endif %} + </td> + <td class="room_name" colspan="2"> + {{ room.title }} + </td> + </tr> + {% if room.invite %} + <tr> + <td></td> + <td> + <a href="{{ room.link }}">Rejoindre la conversation.</a> + </td> + <td></td> + </tr> + {% else %} + {% for notif in room.notifs %} + {% include 'notif.html' with context %} + {% endfor %} + {% endif %} +</table> diff --git a/res/templates-dinsic/room.txt b/res/templates-dinsic/room.txt new file mode 100644
index 0000000000..dd36d01d21 --- /dev/null +++ b/res/templates-dinsic/room.txt
@@ -0,0 +1,9 @@ +{{ room.title }} + +{% if room.invite %} +   Vous avez été invité, rejoignez la conversation en cliquant sur le lien suivant {{ room.link }} +{% else %} + {% for notif in room.notifs %} + {% include 'notif.txt' with context %} + {% endfor %} +{% endif %} diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment
index 448cadb829..d742c522b5 100755 --- a/scripts-dev/check-newsfragment +++ b/scripts-dev/check-newsfragment
@@ -7,9 +7,9 @@ echo -e "+++ \033[32mChecking newsfragment\033[m" set -e -# make sure that origin/develop is up to date -git remote set-branches --add origin develop -git fetch -q origin develop +# make sure that origin/dinsic is up to date +git remote set-branches --add origin dinsic +git fetch -q origin dinsic pr="$BUILDKITE_PULL_REQUEST" diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db
index a34bdf1830..604e0fd662 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db
@@ -45,6 +45,7 @@ from synapse.storage.databases.main.events_bg_updates import ( from synapse.storage.databases.main.media_repository import ( MediaRepositoryBackgroundUpdateStore, ) +from synapse.storage.databases.main.profile import ProfileStore from synapse.storage.databases.main.registration import ( RegistrationBackgroundUpdateStore, find_max_generated_user_id_localpart, @@ -163,6 +164,7 @@ class Store( DeviceBackgroundUpdateStore, EventsBackgroundUpdatesStore, MediaRepositoryBackgroundUpdateStore, + ProfileStore, RegistrationBackgroundUpdateStore, RoomBackgroundUpdateStore, RoomMemberBackgroundUpdateStore, diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 75388643ee..2cd3383f71 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py
@@ -190,7 +190,7 @@ class Auth: access_token = self.get_access_token_from_request(request) - user_id, app_service = await self._get_appservice_user_id(request) + user_id, app_service = self._get_appservice_user_id(request) if user_id: request.authenticated_entity = user_id opentracing.set_tag("authenticated_entity", user_id) @@ -263,10 +263,11 @@ class Auth: except KeyError: raise MissingClientTokenError() - async def _get_appservice_user_id(self, request): + def _get_appservice_user_id(self, request): app_service = self.store.get_app_service_by_token( self.get_access_token_from_request(request) ) + if app_service is None: return None, None @@ -284,8 +285,12 @@ class Auth: if not app_service.is_interested_in_user(user_id): raise AuthError(403, "Application service cannot masquerade as this user.") - if not (await self.store.get_user_by_id(user_id)): - raise AuthError(403, "Application service has not registered this user") + # Let ASes manipulate nonexistent users (e.g. to shadow-register them) + # if not (yield self.store.get_user_by_id(user_id)): + # raise AuthError( + # 403, + # "Application service has not registered this user" + # ) return user_id, app_service async def get_user_by_access_token( diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 94a9e58eae..083f664e3e 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py
@@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 2a2c9e6f13..bb33345be6 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py
@@ -15,10 +15,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import json from typing import List import jsonschema -from canonicaljson import json from jsonschema import FormatChecker from synapse.api.constants import EventContentFields diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index bbfccf955e..6379c86dde 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py
@@ -21,6 +21,7 @@ from urllib.parse import urlencode from synapse.config import ConfigError +SYNAPSE_CLIENT_API_PREFIX = "/_synapse/client" CLIENT_API_PREFIX = "/_matrix/client" FEDERATION_PREFIX = "/_matrix/federation" FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1" diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index b6c9085670..7d309b1bb0 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py
@@ -14,13 +14,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import argparse +import json import logging import os import sys import tempfile -from canonicaljson import json - from twisted.internet import defer, task import synapse diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py
index f985810e88..20af05a361 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py
@@ -157,7 +157,7 @@ class PresenceStatusStubServlet(RestServlet): async def on_GET(self, request, user_id): await self.auth.get_user_by_req(request) - return 200, {"presence": "offline"} + return 200, {"presence": "offline", "user_id": user_id} async def on_PUT(self, request, user_id): await self.auth.get_user_by_req(request) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 6014adc850..b08319ca77 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py
@@ -48,6 +48,7 @@ from synapse.api.urls import ( from synapse.app import _base from synapse.app._base import listen_ssl, listen_tcp, quit_with_error from synapse.config._base import ConfigError +from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig from synapse.config.server import ListenerConfig from synapse.federation.transport.server import TransportLayerServer @@ -209,6 +210,15 @@ class SynapseHomeServer(HomeServer): resources["/_matrix/saml2"] = SAML2Resource(self) + if self.get_config().threepid_behaviour_email == ThreepidBehaviour.LOCAL: + from synapse.rest.synapse.client.password_reset import ( + PasswordResetSubmitTokenResource, + ) + + resources[ + "/_synapse/client/password_reset/email/submit_token" + ] = PasswordResetSubmitTokenResource(self) + if name == "consent": from synapse.rest.consent.consent_resource import ConsentResource diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index ad5ab6ad62..d8b6a7ff1c 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py
@@ -22,6 +22,7 @@ import time import urllib.parse from collections import OrderedDict from hashlib import sha256 +from io import open as io_open from textwrap import dedent from typing import Any, Callable, List, MutableMapping, Optional @@ -190,7 +191,7 @@ class Config: @classmethod def read_file(cls, file_path, config_name): cls.check_file(file_path, config_name) - with open(file_path) as file_stream: + with io_open(file_path, encoding="utf-8") as file_stream: return file_stream.read() def read_templates( diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 7a796996c0..72b42bfd62 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py
@@ -228,6 +228,7 @@ class EmailConfig(Config): self.email_registration_template_text, self.email_add_threepid_template_html, self.email_add_threepid_template_text, + self.email_password_reset_template_confirmation_html, self.email_password_reset_template_failure_html, self.email_registration_template_failure_html, self.email_add_threepid_template_failure_html, @@ -242,6 +243,7 @@ class EmailConfig(Config): registration_template_text, add_threepid_template_html, add_threepid_template_text, + "password_reset_confirmation.html", password_reset_template_failure_html, registration_template_failure_html, add_threepid_template_failure_html, @@ -404,9 +406,13 @@ class EmailConfig(Config): # * The contents of password reset emails sent by the homeserver: # 'password_reset.html' and 'password_reset.txt' # - # * HTML pages for success and failure that a user will see when they follow - # the link in the password reset email: 'password_reset_success.html' and - # 'password_reset_failure.html' + # * An HTML page that a user will see when they follow the link in the password + # reset email. The user will be asked to confirm the action before their + # password is reset: 'password_reset_confirmation.html' + # + # * HTML pages for success and failure that a user will see when they confirm + # the password reset flow using the page above: 'password_reset_success.html' + # and 'password_reset_failure.html' # # * The contents of address verification emails sent during registration: # 'registration.html' and 'registration.txt' diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index c96e6ef62a..13d6f6a3ea 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py
@@ -17,6 +17,7 @@ import logging import logging.config import os import sys +import threading from string import Template import yaml @@ -25,6 +26,7 @@ from twisted.logger import ( ILogObserver, LogBeginner, STDLibLogObserver, + eventAsText, globalLogBeginner, ) @@ -216,8 +218,9 @@ def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): # system. observer = STDLibLogObserver() - def _log(event): + threadlocal = threading.local() + def _log(event): if "log_text" in event: if event["log_text"].startswith("DNSDatagramProtocol starting on "): return @@ -228,7 +231,25 @@ def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner): if event["log_text"].startswith("Timing out client"): return - return observer(event) + # this is a workaround to make sure we don't get stack overflows when the + # logging system raises an error which is written to stderr which is redirected + # to the logging system, etc. + if getattr(threadlocal, "active", False): + # write the text of the event, if any, to the *real* stderr (which may + # be redirected to /dev/null, but there's not much we can do) + try: + event_text = eventAsText(event) + print("logging during logging: %s" % event_text, file=sys.__stderr__) + except Exception: + # gah. + pass + return + + try: + threadlocal.active = True + return observer(event) + finally: + threadlocal.active = False logBeginner.beginLoggingTo([_log], redirectStandardIO=not config.no_redirect_stdio) if not config.no_redirect_stdio: diff --git a/synapse/config/password.py b/synapse/config/password.py
index 9c0ea8c30a..6b2dae78b0 100644 --- a/synapse/config/password.py +++ b/synapse/config/password.py
@@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2015-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 14b8836197..4fca5b6d96 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py
@@ -76,6 +76,9 @@ class RatelimitConfig(Config): ) self.rc_registration = RateLimitConfig(config.get("rc_registration", {})) + self.rc_third_party_invite = RateLimitConfig( + config.get("rc_third_party_invite", {}) + ) rc_login_config = config.get("rc_login", {}) self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {})) @@ -124,6 +127,8 @@ class RatelimitConfig(Config): # - one for login that ratelimits login requests based on the account the # client is attempting to log into, based on the amount of failed login # attempts for this account. + # - one that ratelimits third-party invites requests based on the account + # that's making the requests. # - one for ratelimiting redactions by room admins. If this is not explicitly # set then it uses the same ratelimiting as per rc_message. This is useful # to allow room admins to deal with abuse quickly. @@ -153,6 +158,10 @@ class RatelimitConfig(Config): # per_second: 0.17 # burst_count: 3 # + #rc_third_party_invite: + # per_second: 0.2 + # burst_count: 10 + # #rc_admin_redaction: # per_second: 1 # burst_count: 50 diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index a185655774..3c2e951a71 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py
@@ -100,8 +100,19 @@ class RegistrationConfig(Config): self.registrations_require_3pid = config.get("registrations_require_3pid", []) self.allowed_local_3pids = config.get("allowed_local_3pids", []) + self.check_is_for_allowed_local_3pids = config.get( + "check_is_for_allowed_local_3pids", None + ) + self.allow_invited_3pids = config.get("allow_invited_3pids", False) + + self.disable_3pid_changes = config.get("disable_3pid_changes", False) + self.enable_3pid_lookup = config.get("enable_3pid_lookup", True) self.registration_shared_secret = config.get("registration_shared_secret") + self.register_mxid_from_3pid = config.get("register_mxid_from_3pid") + self.register_just_use_email_for_display_name = config.get( + "register_just_use_email_for_display_name", False + ) self.bcrypt_rounds = config.get("bcrypt_rounds", 12) self.trusted_third_party_id_servers = config.get( @@ -109,7 +120,21 @@ class RegistrationConfig(Config): ) account_threepid_delegates = config.get("account_threepid_delegates") or {} self.account_threepid_delegate_email = account_threepid_delegates.get("email") + if ( + self.account_threepid_delegate_email + and not self.account_threepid_delegate_email.startswith("http") + ): + raise ConfigError( + "account_threepid_delegates.email must begin with http:// or https://" + ) self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn") + if ( + self.account_threepid_delegate_msisdn + and not self.account_threepid_delegate_msisdn.startswith("http") + ): + raise ConfigError( + "account_threepid_delegates.msisdn must begin with http:// or https://" + ) if self.account_threepid_delegate_msisdn and not self.public_baseurl: raise ConfigError( "The configuration option `public_baseurl` is required if " @@ -178,6 +203,15 @@ class RegistrationConfig(Config): self.enable_set_avatar_url = config.get("enable_set_avatar_url", True) self.enable_3pid_changes = config.get("enable_3pid_changes", True) + self.replicate_user_profiles_to = config.get("replicate_user_profiles_to", []) + if not isinstance(self.replicate_user_profiles_to, list): + self.replicate_user_profiles_to = [self.replicate_user_profiles_to] + + self.shadow_server = config.get("shadow_server", None) + self.rewrite_identity_server_urls = ( + config.get("rewrite_identity_server_urls") or {} + ) + self.disable_msisdn_registration = config.get( "disable_msisdn_registration", False ) @@ -187,6 +221,23 @@ class RegistrationConfig(Config): session_lifetime = self.parse_duration(session_lifetime) self.session_lifetime = session_lifetime + self.bind_new_user_emails_to_sydent = config.get( + "bind_new_user_emails_to_sydent" + ) + + if self.bind_new_user_emails_to_sydent: + if not isinstance( + self.bind_new_user_emails_to_sydent, str + ) or not self.bind_new_user_emails_to_sydent.startswith("http"): + raise ConfigError( + "Option bind_new_user_emails_to_sydent has invalid value" + ) + + # Remove trailing slashes + self.bind_new_user_emails_to_sydent = self.bind_new_user_emails_to_sydent.strip( + "/" + ) + def generate_config_section(self, generate_secrets=False, **kwargs): if generate_secrets: registration_shared_secret = 'registration_shared_secret: "%s"' % ( @@ -291,9 +342,32 @@ class RegistrationConfig(Config): # #disable_msisdn_registration: true + # Derive the user's matrix ID from a type of 3PID used when registering. + # This overrides any matrix ID the user proposes when calling /register + # The 3PID type should be present in registrations_require_3pid to avoid + # users failing to register if they don't specify the right kind of 3pid. + # + #register_mxid_from_3pid: email + + # Uncomment to set the display name of new users to their email address, + # rather than using the default heuristic. + # + #register_just_use_email_for_display_name: true + # Mandate that users are only allowed to associate certain formats of # 3PIDs with accounts on this server. # + # Use an Identity Server to establish which 3PIDs are allowed to register? + # Overrides allowed_local_3pids below. + # + #check_is_for_allowed_local_3pids: matrix.org + # + # If you are using an IS you can also check whether that IS registers + # pending invites for the given 3PID (and then allow it to sign up on + # the platform): + # + #allow_invited_3pids: false + # #allowed_local_3pids: # - medium: email # pattern: '.*@matrix\\.org' @@ -302,6 +376,11 @@ class RegistrationConfig(Config): # - medium: msisdn # pattern: '\\+44' + # If true, stop users from trying to change the 3PIDs associated with + # their accounts. + # + #disable_3pid_changes: false + # Enable 3PIDs lookup requests to identity servers from this server. # #enable_3pid_lookup: true @@ -333,6 +412,30 @@ class RegistrationConfig(Config): # #default_identity_server: https://matrix.org + # If enabled, user IDs, display names and avatar URLs will be replicated + # to this server whenever they change. + # This is an experimental API currently implemented by sydent to support + # cross-homeserver user directories. + # + #replicate_user_profiles_to: example.com + + # If specified, attempt to replay registrations, profile changes & 3pid + # bindings on the given target homeserver via the AS API. The HS is authed + # via a given AS token. + # + #shadow_server: + # hs_url: https://shadow.example.com + # hs: shadow.example.com + # as_token: 12u394refgbdhivsia + + # If enabled, don't let users set their own display names/avatars + # other than for the very first time (unless they are a server admin). + # Useful when provisioning users based on the contents of a 3rd party + # directory and to avoid ambiguities. + # + #disable_set_displayname: false + #disable_set_avatar_url: false + # Handle threepid (email/phone etc) registration and password resets through a set of # *trusted* identity servers. Note that this allows the configured identity server to # reset passwords for accounts! @@ -458,6 +561,31 @@ class RegistrationConfig(Config): # Defaults to true. # #auto_join_rooms_for_guests: false + + # Rewrite identity server URLs with a map from one URL to another. Applies to URLs + # provided by clients (which have https:// prepended) and those specified + # in `account_threepid_delegates`. URLs should not feature a trailing slash. + # + #rewrite_identity_server_urls: + # "https://somewhere.example.com": "https://somewhereelse.example.com" + + # When a user registers an account with an email address, it can be useful to + # bind that email address to their mxid on an identity server. Typically, this + # requires the user to validate their email address with the identity server. + # However if Synapse itself is handling email validation on registration, the + # user ends up needing to validate their email twice, which leads to poor UX. + # + # It is possible to force Sydent, one identity server implementation, to bind + # threepids using its internal, unauthenticated bind API: + # https://github.com/matrix-org/sydent/#internal-bind-and-unbind-api + # + # Configure the address of a Sydent server here to have Synapse attempt + # to automatically bind users' emails following registration. The + # internal bind API must be reachable from Synapse, but should NOT be + # exposed to any third party, as it allows the creation of bindings + # without validation. + # + #bind_new_user_emails_to_sydent: https://example.com:8091 """ % locals() ) diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 01009f3924..54f565ad5b 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py
@@ -104,6 +104,12 @@ class ContentRepositoryConfig(Config): self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M")) self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M")) + self.max_avatar_size = config.get("max_avatar_size") + if self.max_avatar_size: + self.max_avatar_size = self.parse_size(self.max_avatar_size) + + self.allowed_avatar_mimetypes = config.get("allowed_avatar_mimetypes", []) + self.media_store_path = self.ensure_directory( config.get("media_store_path", "media_store") ) @@ -244,6 +250,30 @@ class ContentRepositoryConfig(Config): # #max_upload_size: 10M + # The largest allowed size for a user avatar. If not defined, no + # restriction will be imposed. + # + # Note that this only applies when an avatar is changed globally. + # Per-room avatar changes are not affected. See allow_per_room_profiles + # for disabling that functionality. + # + # Note that user avatar changes will not work if this is set without + # using Synapse's local media repo. + # + #max_avatar_size: 10M + + # Allow mimetypes for a user avatar. If not defined, no restriction will + # be imposed. + # + # Note that this only applies when an avatar is changed globally. + # Per-room avatar changes are not affected. See allow_per_room_profiles + # for disabling that functionality. + # + # Note that user avatar changes will not work if this is set without + # using Synapse's local media repo. + # + #allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] + # Maximum number of pixels that will be thumbnailed # #max_image_pixels: 32M diff --git a/synapse/config/server.py b/synapse/config/server.py
index e85c6a0840..eff2a846a4 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py
@@ -19,7 +19,7 @@ import logging import os.path import re from textwrap import indent -from typing import Any, Dict, Iterable, List, Optional +from typing import Any, Dict, Iterable, List, Optional, Set import attr import yaml @@ -276,6 +276,12 @@ class ServerConfig(Config): # events with profile information that differ from the target's global profile. self.allow_per_room_profiles = config.get("allow_per_room_profiles", True) + # Whether to show the users on this homeserver in the user directory. Defaults to + # True. + self.show_users_in_user_directory = config.get( + "show_users_in_user_directory", True + ) + retention_config = config.get("retention") if retention_config is None: retention_config = {} @@ -542,6 +548,19 @@ class ServerConfig(Config): users_new_default_push_rules ) # type: set + # Whitelist of domain names that given next_link parameters must have + next_link_domain_whitelist = config.get( + "next_link_domain_whitelist" + ) # type: Optional[List[str]] + + self.next_link_domain_whitelist = None # type: Optional[Set[str]] + if next_link_domain_whitelist is not None: + if not isinstance(next_link_domain_whitelist, list): + raise ConfigError("'next_link_domain_whitelist' must be a list") + + # Turn the list into a set to improve lookup speed. + self.next_link_domain_whitelist = set(next_link_domain_whitelist) + def has_tls_listener(self) -> bool: return any(listener.tls for listener in self.listeners) @@ -923,6 +942,74 @@ class ServerConfig(Config): # #allow_per_room_profiles: false + # Whether to show the users on this homeserver in the user directory. Defaults to + # 'true'. + # + #show_users_in_user_directory: false + + # Message retention policy at the server level. + # + # Room admins and mods can define a retention period for their rooms using the + # 'm.room.retention' state event, and server admins can cap this period by setting + # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options. + # + # If this feature is enabled, Synapse will regularly look for and purge events + # which are older than the room's maximum retention period. Synapse will also + # filter events received over federation so that events that should have been + # purged are ignored and not stored again. + # + retention: + # The message retention policies feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # Default retention policy. If set, Synapse will apply it to rooms that lack the + # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't + # matter much because Synapse doesn't take it into account yet. + # + #default_policy: + # min_lifetime: 1d + # max_lifetime: 1y + + # Retention policy limits. If set, a user won't be able to send a + # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime' + # that's not within this range. This is especially useful in closed federations, + # in which server admins can make sure every federating server applies the same + # rules. + # + #allowed_lifetime_min: 1d + #allowed_lifetime_max: 1y + + # Server admins can define the settings of the background jobs purging the + # events which lifetime has expired under the 'purge_jobs' section. + # + # If no configuration is provided, a single job will be set up to delete expired + # events in every room daily. + # + # Each job's configuration defines which range of message lifetimes the job + # takes care of. For example, if 'shortest_max_lifetime' is '2d' and + # 'longest_max_lifetime' is '3d', the job will handle purging expired events in + # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and + # lower than or equal to 3 days. Both the minimum and the maximum value of a + # range are optional, e.g. a job with no 'shortest_max_lifetime' and a + # 'longest_max_lifetime' of '3d' will handle every room with a retention policy + # which 'max_lifetime' is lower than or equal to three days. + # + # The rationale for this per-job configuration is that some rooms might have a + # retention policy with a low 'max_lifetime', where history needs to be purged + # of outdated messages on a very frequent basis (e.g. every 5min), but not want + # that purge to be performed by a job that's iterating over every room it knows, + # which would be quite heavy on the server. + # + #purge_jobs: + # - shortest_max_lifetime: 1d + # longest_max_lifetime: 3d + # interval: 5m: + # - shortest_max_lifetime: 3d + # longest_max_lifetime: 1y + # interval: 24h + # How long to keep redacted events in unredacted form in the database. After # this period redacted events get replaced with their redacted form in the DB. # @@ -1014,6 +1101,24 @@ class ServerConfig(Config): # act as if no error happened and return a fake session ID ('sid') to clients. # #request_token_inhibit_3pid_errors: true + + # A list of domains that the domain portion of 'next_link' parameters + # must match. + # + # This parameter is optionally provided by clients while requesting + # validation of an email or phone number, and maps to a link that + # users will be automatically redirected to after validation + # succeeds. Clients can make use this parameter to aid the validation + # process. + # + # The whitelist is applied whether the homeserver or an + # identity server is handling validation. + # + # The default value is no whitelist functionality; all domains are + # allowed. Setting this value to an empty list will instead disallow + # all domains. + # + #next_link_domain_whitelist: ["matrix.org"] """ % locals() ) diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py
index c8d19c5d6b..43b6c40456 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py
@@ -26,6 +26,7 @@ class UserDirectoryConfig(Config): def read_config(self, config, **kwargs): self.user_directory_search_enabled = True self.user_directory_search_all_users = False + self.user_directory_defer_to_id_server = None user_directory_config = config.get("user_directory", None) if user_directory_config: self.user_directory_search_enabled = user_directory_config.get( @@ -34,6 +35,9 @@ class UserDirectoryConfig(Config): self.user_directory_search_all_users = user_directory_config.get( "search_all_users", False ) + self.user_directory_defer_to_id_server = user_directory_config.get( + "defer_to_id_server", None + ) def generate_config_section(self, config_dir_path, server_name, **kwargs): return """ @@ -52,4 +56,9 @@ class UserDirectoryConfig(Config): #user_directory: # enabled: true # search_all_users: false + # + # # If this is set, user search will be delegated to this ID server instead + # # of synapse performing the search itself. + # # This is an experimental API. + # defer_to_id_server: https://id.example.com """ diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index b0fc859a47..d141519ec4 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py
@@ -59,42 +59,82 @@ class SpamChecker: return False def user_may_invite( - self, inviter_userid: str, invitee_userid: str, room_id: str + self, + inviter_userid: str, + invitee_userid: str, + third_party_invite: Optional[Dict], + room_id: str, + new_room: bool, + published_room: bool, ) -> bool: """Checks if a given user may send an invite If this method returns false, the invite will be rejected. Args: - inviter_userid: The user ID of the sender of the invitation - invitee_userid: The user ID targeted in the invitation - room_id: The room ID + inviter_userid: + invitee_userid: The user ID of the invitee. Is None + if this is a third party invite and the 3PID is not bound to a + user ID. + third_party_invite: If a third party invite then is a + dict containing the medium and address of the invitee. + room_id: + new_room: Whether the user is being invited to the room as + part of a room creation, if so the invitee would have been + included in the call to `user_may_create_room`. + published_room: Whether the room the user is being invited + to has been published in the local homeserver's public room + directory. Returns: True if the user may send an invite, otherwise False """ for spam_checker in self.spam_checkers: if ( - spam_checker.user_may_invite(inviter_userid, invitee_userid, room_id) + spam_checker.user_may_invite( + inviter_userid, + invitee_userid, + third_party_invite, + room_id, + new_room, + published_room, + ) is False ): return False return True - def user_may_create_room(self, userid: str) -> bool: + def user_may_create_room( + self, + userid: str, + invite_list: List[str], + third_party_invite_list: List[Dict], + cloning: bool, + ) -> bool: """Checks if a given user may create a room If this method returns false, the creation request will be rejected. Args: userid: The ID of the user attempting to create a room + invite_list: List of user IDs that would be invited to + the new room. + third_party_invite_list: List of third party invites + for the new room. + cloning: Whether the user is cloning an existing room, e.g. + upgrading a room. Returns: True if the user may create a room, otherwise False """ for spam_checker in self.spam_checkers: - if spam_checker.user_may_create_room(userid) is False: + if ( + spam_checker.user_may_create_room( + userid, invite_list, third_party_invite_list, cloning + ) + is False + ): return False return True @@ -135,6 +175,25 @@ class SpamChecker: return True + def user_may_join_room(self, userid: str, room_id: str, is_invited: bool): + """Checks if a given users is allowed to join a room. + + Not called when a user creates a room. + + Args: + userid: + room_id: + is_invited: Whether the user is invited into the room + + Returns: + bool: Whether the user may join the room + """ + for spam_checker in self.spam_checkers: + if spam_checker.user_may_join_room(userid, room_id, is_invited) is False: + return False + + return True + def check_username_for_spam(self, user_profile: Dict[str, str]) -> bool: """Checks if a user ID or display name are considered "spammy" by this server. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 9d5310851c..1ca77519d5 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py
@@ -12,10 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Callable from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.types import Requester +from synapse.module_api import ModuleApi +from synapse.types import Requester, StateMap class ThirdPartyEventRules: @@ -38,7 +40,7 @@ class ThirdPartyEventRules: if module is not None: self.third_party_rules = module( - config=config, http_client=hs.get_simple_http_client() + config=config, module_api=ModuleApi(hs, hs.get_auth_handler()), ) async def check_event_allowed( @@ -106,6 +108,48 @@ class ThirdPartyEventRules: if self.third_party_rules is None: return True + state_events = await self._get_state_map_for_room(room_id) + + ret = await self.third_party_rules.check_threepid_can_be_invited( + medium, address, state_events + ) + return ret + + async def check_visibility_can_be_modified( + self, room_id: str, new_visibility: str + ) -> bool: + """Check if a room is allowed to be published to, or removed from, the public room + list. + + Args: + room_id: The ID of the room. + new_visibility: The new visibility state. Either "public" or "private". + + Returns: + True if the room's visibility can be modified, False if not. + """ + if self.third_party_rules is None: + return True + + check_func = getattr( + self.third_party_rules, "check_visibility_can_be_modified", None + ) + if not check_func or not isinstance(check_func, Callable): + return True + + state_events = await self._get_state_map_for_room(room_id) + + return await check_func(room_id, state_events, new_visibility) + + async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]: + """Given a room ID, return the state events of that room. + + Args: + room_id: The ID of the room. + + Returns: + A dict mapping (event type, state key) to state event. + """ state_ids = await self.store.get_filtered_current_state_ids(room_id) room_state_events = await self.store.get_events(state_ids.values()) @@ -113,7 +157,4 @@ class ThirdPartyEventRules: for key, event_id in state_ids.items(): state_events[key] = room_state_events[event_id] - ret = await self.third_party_rules.check_threepid_can_be_invited( - medium, address, state_events - ) - return ret + return state_events diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 552519e82c..41a726878d 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py
@@ -209,7 +209,7 @@ class FederationSender: logger.debug("Sending %s to %r", event, destinations) if destinations: - self._send_pdu(event, destinations) + await self._send_pdu(event, destinations) now = self.clock.time_msec() ts = await self.store.get_received_ts(event.event_id) @@ -265,7 +265,7 @@ class FederationSender: finally: self._is_processing = False - def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None: + async def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None: # We loop through all destinations to see whether we already have # a transaction in progress. If we do, stick it in the pending_pdus # table and we'll get back to it later. @@ -280,6 +280,13 @@ class FederationSender: sent_pdus_destination_dist_total.inc(len(destinations)) sent_pdus_destination_dist_count.inc() + # track the fact that we have a PDU for these destinations, + # to allow us to perform catch-up later on if the remote is unreachable + # for a while. + await self.store.store_destination_rooms_entries( + destinations, pdu.room_id, pdu.internal_metadata.stream_ordering, + ) + for destination in destinations: self._get_per_destination_queue(destination).send_pdu(pdu) diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index defc228c23..9f0852b4a2 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py
@@ -325,6 +325,17 @@ class PerDestinationQueue: self._last_device_stream_id = device_stream_id self._last_device_list_stream_id = dev_list_id + + if pending_pdus: + # we sent some PDUs and it was successful, so update our + # last_successful_stream_ordering in the destinations table. + final_pdu = pending_pdus[-1] + last_successful_stream_ordering = ( + final_pdu.internal_metadata.stream_ordering + ) + await self._store.set_destination_last_successful_stream_ordering( + self._destination, last_successful_stream_ordering + ) else: break except NotRetryingDestination as e: diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 17a10f622e..4f7996f947 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py
@@ -16,7 +16,7 @@ import logging import urllib -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional from synapse.api.constants import Membership from synapse.api.errors import Codes, HttpResponseException, SynapseError @@ -1004,6 +1004,20 @@ class TransportLayerClient: return self.client.get_json(destination=destination, path=path) + def get_info_of_users(self, destination: str, user_ids: List[str]): + """ + Args: + destination: The remote server + user_ids: A list of user IDs to query info about + + Returns: + Deferred[List]: A dictionary of User ID to information about that user. + """ + path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/users/info") + data = {"user_ids": user_ids} + + return self.client.post_json(destination=destination, path=path, data=data) + def _create_path(federation_prefix, path, *args): """ diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 9325e0f857..794b3098e9 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py
@@ -31,6 +31,7 @@ from synapse.api.urls import ( from synapse.http.endpoint import parse_and_validate_server_name from synapse.http.server import JsonResource from synapse.http.servlet import ( + assert_params_in_dict, parse_boolean_from_args, parse_integer_from_args, parse_json_object_from_request, @@ -845,6 +846,57 @@ class PublicRoomList(BaseFederationServlet): return 200, data +class FederationUserInfoServlet(BaseFederationServlet): + """ + Return information about a set of users. + + This API returns expiration and deactivation information about a set of + users. Requested users not local to this homeserver will be ignored. + + Example request: + POST /users/info + + { + "user_ids": [ + "@alice:example.com", + "@bob:example.com" + ] + } + + Example response + { + "@alice:example.com": { + "expired": false, + "deactivated": true + } + } + """ + + PATH = "/users/info" + PREFIX = FEDERATION_UNSTABLE_PREFIX + + def __init__(self, handler, authenticator, ratelimiter, server_name): + super(FederationUserInfoServlet, self).__init__( + handler, authenticator, ratelimiter, server_name + ) + self.handler = handler + + async def on_POST(self, origin, content, query): + assert_params_in_dict(content, required=["user_ids"]) + + user_ids = content.get("user_ids", []) + + if not isinstance(user_ids, list): + raise SynapseError( + 400, + "'user_ids' must be a list of user ID strings", + errcode=Codes.INVALID_PARAM, + ) + + data = await self.handler.store.get_info_for_users(user_ids) + return 200, data + + class FederationVersionServlet(BaseFederationServlet): PATH = "/version" @@ -1406,6 +1458,7 @@ FEDERATION_SERVLET_CLASSES = ( On3pidBindServlet, FederationVersionServlet, RoomComplexityServlet, + FederationUserInfoServlet, ) # type: Tuple[Type[BaseFederationServlet], ...] OPENID_SERVLET_CLASSES = ( diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 4caf6d591a..5162cc9a58 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py
@@ -38,6 +38,8 @@ class AccountValidityHandler: self.clock = self.hs.get_clock() self._account_validity = self.hs.config.account_validity + self._show_users_in_user_directory = self.hs.config.show_users_in_user_directory + self.profile_handler = self.hs.get_profile_handler() if ( self._account_validity.enabled @@ -72,6 +74,18 @@ class AccountValidityHandler: self.clock.looping_call(send_emails, 30 * 60 * 1000) + # Mark users as inactive when they expired. Check once every hour + if self._account_validity.enabled: + + def mark_expired_users_as_inactive(): + # run as a background process to allow async functions to work + return run_as_background_process( + "_mark_expired_users_as_inactive", + self._mark_expired_users_as_inactive, + ) + + self.clock.looping_call(mark_expired_users_as_inactive, 60 * 60 * 1000) + async def _send_renewal_emails(self): """Gets the list of users whose account is expiring in the amount of time configured in the ``renew_at`` parameter from the ``account_validity`` @@ -252,4 +266,24 @@ class AccountValidityHandler: user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent ) + # Check if renewed users should be reintroduced to the user directory + if self._show_users_in_user_directory: + # Show the user in the directory again by setting them to active + await self.profile_handler.set_active( + [UserID.from_string(user_id)], True, True + ) + return expiration_ts + + async def _mark_expired_users_as_inactive(self): + """Iterate over active, expired users. Mark them as inactive in order to hide them + from the user directory. + + Returns: + Deferred + """ + # Get active, expired users + active_expired_users = await self.store.get_expired_users() + + # Mark each as non-active + await self.profile_handler.set_active(active_expired_users, False, True) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 25169157c1..0e26a32750 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py
@@ -35,6 +35,7 @@ class DeactivateAccountHandler(BaseHandler): self._device_handler = hs.get_device_handler() self._room_member_handler = hs.get_room_member_handler() self._identity_handler = hs.get_handlers().identity_handler + self._profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() # Flag that indicates whether the process to part users from rooms is running @@ -108,6 +109,9 @@ class DeactivateAccountHandler(BaseHandler): await self.store.user_set_password_hash(user_id, None) + user = UserID.from_string(user_id) + await self._profile_handler.set_active([user], False, False) + # Add the user to a table of users pending deactivation (ie. # removal from all the rooms they're a member of) await self.store.add_user_pending_deactivation(user_id) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py
index 46826eb784..bc61bb6acb 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py
@@ -46,6 +46,7 @@ class DirectoryHandler(BaseHandler): self.config = hs.config self.enable_room_list_search = hs.config.enable_room_list_search self.require_membership = hs.config.require_membership_for_aliases + self.third_party_event_rules = hs.get_third_party_event_rules() self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -454,6 +455,15 @@ class DirectoryHandler(BaseHandler): # per alias creation rule? raise SynapseError(403, "Not allowed to publish room") + # Check if publishing is blocked by a third party module + allowed_by_third_party_rules = await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) + ) + if not allowed_by_third_party_rules: + raise SynapseError(403, "Not allowed to publish room") + await self.store.set_room_is_public(room_id, making_public) async def edit_published_appservice_room_list( diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py
index b05e32f457..fdce54c5c3 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py
@@ -39,10 +39,6 @@ class EventStreamHandler(BaseHandler): def __init__(self, hs: "HomeServer"): super(EventStreamHandler, self).__init__(hs) - self.distributor = hs.get_distributor() - self.distributor.declare("started_user_eventstream") - self.distributor.declare("stopped_user_eventstream") - self.clock = hs.get_clock() self.notifier = hs.get_notifier() diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 43f2986f89..8008fb8a51 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py
@@ -69,7 +69,6 @@ from synapse.replication.http.federation import ( ReplicationFederationSendEventsRestServlet, ReplicationStoreRoomOnInviteRestServlet, ) -from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet from synapse.state import StateResolutionStore, resolve_events_with_store from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( @@ -80,7 +79,6 @@ from synapse.types import ( get_domain_from_id, ) from synapse.util.async_helpers import Linearizer, concurrently_execute -from synapse.util.distributor import user_joined_room from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_server @@ -141,9 +139,6 @@ class FederationHandler(BaseHandler): self._replication = hs.get_replication_data_handler() self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs) - self._notify_user_membership_change = ReplicationUserJoinedLeftRoomRestServlet.make_client( - hs - ) self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client( hs ) @@ -182,7 +177,7 @@ class FederationHandler(BaseHandler): room_id = pdu.room_id event_id = pdu.event_id - logger.info("handling received PDU: %s", pdu) + logger.info("[%s %s] handling received PDU: %s", room_id, event_id, pdu) # We reprocess pdus when we have seen them only as outliers existing = await self.store.get_event( @@ -297,6 +292,14 @@ class FederationHandler(BaseHandler): room_id, event_id, ) + elif missing_prevs: + logger.info( + "[%s %s] Not recursively fetching %d missing prev_events: %s", + room_id, + event_id, + len(missing_prevs), + shortstr(missing_prevs), + ) if prevs - seen: # We've still not been able to get all of the prev_events for this event. @@ -341,12 +344,6 @@ class FederationHandler(BaseHandler): affected=pdu.event_id, ) - logger.info( - "Event %s is missing prev_events: calculating state for a " - "backwards extremity", - event_id, - ) - # Calculate the state after each of the previous events, and # resolve them to find the correct state at the current event. event_map = {event_id: pdu} @@ -364,7 +361,10 @@ class FederationHandler(BaseHandler): # know about for p in prevs - seen: logger.info( - "Requesting state at missing prev_event %s", event_id, + "[%s %s] Requesting state at missing prev_event %s", + room_id, + event_id, + p, ) with nested_logging_context(p): @@ -399,9 +399,7 @@ class FederationHandler(BaseHandler): # First though we need to fetch all the events that are in # state_map, so we can build up the state below. evs = await self.store.get_events( - list(state_map.values()), - get_prev_content=False, - redact_behaviour=EventRedactBehaviour.AS_IS, + list(state_map.values()), get_prev_content=False, ) event_map.update(evs) @@ -704,31 +702,10 @@ class FederationHandler(BaseHandler): logger.debug("[%s %s] Processing event: %s", room_id, event_id, event) try: - context = await self._handle_new_event(origin, event, state=state) + await self._handle_new_event(origin, event, state=state) except AuthError as e: raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) - if event.type == EventTypes.Member: - if event.membership == Membership.JOIN: - # Only fire user_joined_room if the user has acutally - # joined the room. Don't bother if the user is just - # changing their profile info. - newly_joined = True - - prev_state_ids = await context.get_prev_state_ids() - - prev_state_id = prev_state_ids.get((event.type, event.state_key)) - if prev_state_id: - prev_state = await self.store.get_event( - prev_state_id, allow_none=True - ) - if prev_state and prev_state.membership == Membership.JOIN: - newly_joined = False - - if newly_joined: - user = UserID.from_string(event.state_key) - await self.user_joined_room(user, room_id) - # For encrypted messages we check that we know about the sending device, # if we don't then we mark the device cache for that user as stale. if event.type == EventTypes.Encrypted: @@ -1550,11 +1527,6 @@ class FederationHandler(BaseHandler): event.signatures, ) - if event.type == EventTypes.Member: - if event.content["membership"] == Membership.JOIN: - user = UserID.from_string(event.state_key) - await self.user_joined_room(user, event.room_id) - prev_state_ids = await context.get_prev_state_ids() state_ids = list(prev_state_ids.values()) @@ -1581,8 +1553,15 @@ class FederationHandler(BaseHandler): if self.hs.config.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") + is_published = await self.store.is_room_published(event.room_id) + if not self.spam_checker.user_may_invite( - event.sender, event.state_key, event.room_id + event.sender, + event.state_key, + None, + room_id=event.room_id, + new_room=False, + published_room=is_published, ): raise SynapseError( 403, "This user is not permitted to send invites to this server/user" @@ -2970,7 +2949,7 @@ class FederationHandler(BaseHandler): event, event_stream_id, max_stream_id, extra_users=extra_users ) - await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) + await self.pusher_pool.on_new_notifications(max_stream_id) async def _clean_room_for_join(self, room_id: str) -> None: """Called to clean up any data in DB for a given room, ready for the @@ -2984,16 +2963,6 @@ class FederationHandler(BaseHandler): else: await self.store.clean_room_for_join(room_id) - async def user_joined_room(self, user: UserID, room_id: str) -> None: - """Called when a new user has joined the room - """ - if self.config.worker_app: - await self._notify_user_membership_change( - room_id=room_id, user_id=user.to_string(), change="joined" - ) - else: - user_joined_room(self.distributor, user, room_id) - async def get_room_complexity( self, remote_room_hosts: List[str], room_id: str ) -> Optional[dict]: diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 0ce6ddfbe4..b5676b248b 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py
@@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2018, 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,9 +24,11 @@ from typing import Awaitable, Callable, Dict, List, Optional, Tuple from twisted.internet.error import TimeoutError from synapse.api.errors import ( + AuthError, CodeMessageException, Codes, HttpResponseException, + ProxiedRequestError, SynapseError, ) from synapse.config.emailconfig import ThreepidBehaviour @@ -40,31 +42,36 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) -id_server_scheme = "https://" - class IdentityHandler(BaseHandler): def __init__(self, hs): super(IdentityHandler, self).__init__(hs) - self.http_client = SimpleHttpClient(hs) + self.hs = hs + self.http_client = hs.get_simple_http_client() # We create a blacklisting instance of SimpleHttpClient for contacting identity # servers specified by clients self.blacklisting_http_client = SimpleHttpClient( hs, ip_blacklist=hs.config.federation_ip_range_blacklist ) self.federation_http_client = hs.get_http_client() - self.hs = hs + + self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers) + self.trust_any_id_server_just_for_testing_do_not_use = ( + hs.config.use_insecure_ssl_client_just_for_testing_do_not_use + ) + self.rewrite_identity_server_urls = hs.config.rewrite_identity_server_urls + self._enable_lookup = hs.config.enable_3pid_lookup async def threepid_from_creds( - self, id_server: str, creds: Dict[str, str] + self, id_server_url: str, creds: Dict[str, str] ) -> Optional[JsonDict]: """ Retrieve and validate a threepid identifier from a "credentials" dictionary against a given identity server Args: - id_server: The identity server to validate 3PIDs against. Must be a + id_server_url: The identity server to validate 3PIDs against. Must be a complete URL including the protocol (http(s)://) creds: Dictionary containing the following keys: * client_secret|clientSecret: A unique secret str provided by the client @@ -89,7 +96,14 @@ class IdentityHandler(BaseHandler): query_params = {"sid": session_id, "client_secret": client_secret} - url = id_server + "/_matrix/identity/api/v1/3pid/getValidated3pid" + # if we have a rewrite rule set for the identity server, + # apply it now. + id_server_url = self.rewrite_id_server_url(id_server_url) + + url = "%s%s" % ( + id_server_url, + "/_matrix/identity/api/v1/3pid/getValidated3pid", + ) try: data = await self.http_client.get_json(url, query_params) @@ -98,7 +112,7 @@ class IdentityHandler(BaseHandler): except HttpResponseException as e: logger.info( "%s returned %i for threepid validation for: %s", - id_server, + id_server_url, e.code, creds, ) @@ -112,7 +126,7 @@ class IdentityHandler(BaseHandler): if "medium" in data: return data - logger.info("%s reported non-validated threepid: %s", id_server, creds) + logger.info("%s reported non-validated threepid: %s", id_server_url, creds) return None async def bind_threepid( @@ -144,14 +158,19 @@ class IdentityHandler(BaseHandler): if id_access_token is None: use_v2 = False + # if we have a rewrite rule set for the identity server, + # apply it now, but only for sending the request (not + # storing in the database). + id_server_url = self.rewrite_id_server_url(id_server, add_https=True) + # Decide which API endpoint URLs to use headers = {} bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid} if use_v2: - bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,) + bind_url = "%s/_matrix/identity/v2/3pid/bind" % (id_server_url,) headers["Authorization"] = create_id_access_token_header(id_access_token) # type: ignore else: - bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,) + bind_url = "%s/_matrix/identity/api/v1/3pid/bind" % (id_server_url,) try: # Use the blacklisting http client as this call is only to identity servers @@ -238,9 +257,6 @@ class IdentityHandler(BaseHandler): True on success, otherwise False if the identity server doesn't support unbinding """ - url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,) - url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii") - content = { "mxid": mxid, "threepid": {"medium": threepid["medium"], "address": threepid["address"]}, @@ -249,6 +265,7 @@ class IdentityHandler(BaseHandler): # we abuse the federation http client to sign the request, but we have to send it # using the normal http client since we don't want the SRV lookup and want normal # 'browser-like' HTTPS. + url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii") auth_headers = self.federation_http_client.build_auth_headers( destination=None, method=b"POST", @@ -258,6 +275,15 @@ class IdentityHandler(BaseHandler): ) headers = {b"Authorization": auth_headers} + # if we have a rewrite rule set for the identity server, + # apply it now. + # + # Note that destination_is has to be the real id_server, not + # the server we connect to. + id_server_url = self.rewrite_id_server_url(id_server, add_https=True) + + url = "%s/_matrix/identity/api/v1/3pid/unbind" % (id_server_url,) + try: # Use the blacklisting http client as this call is only to identity servers # provided by a client @@ -371,9 +397,28 @@ class IdentityHandler(BaseHandler): return session_id + def rewrite_id_server_url(self, url: str, add_https=False) -> str: + """Given an identity server URL, optionally add a protocol scheme + before rewriting it according to the rewrite_identity_server_urls + config option + + Adds https:// to the URL if specified, then tries to rewrite the + url. Returns either the rewritten URL or the URL with optional + protocol scheme additions. + """ + rewritten_url = url + if add_https: + rewritten_url = "https://" + rewritten_url + + rewritten_url = self.rewrite_identity_server_urls.get( + rewritten_url, rewritten_url + ) + logger.debug("Rewriting identity server rule from %s to %s", url, rewritten_url) + return rewritten_url + async def requestEmailToken( self, - id_server: str, + id_server_url: str, email: str, client_secret: str, send_attempt: int, @@ -384,7 +429,7 @@ class IdentityHandler(BaseHandler): validation. Args: - id_server: The identity server to proxy to + id_server_url: The identity server to proxy to email: The email to send the message to client_secret: The unique client_secret sends by the user send_attempt: Which attempt this is @@ -398,6 +443,11 @@ class IdentityHandler(BaseHandler): "client_secret": client_secret, "send_attempt": send_attempt, } + + # if we have a rewrite rule set for the identity server, + # apply it now. + id_server_url = self.rewrite_id_server_url(id_server_url) + if next_link: params["next_link"] = next_link @@ -412,7 +462,8 @@ class IdentityHandler(BaseHandler): try: data = await self.http_client.post_json_get_json( - id_server + "/_matrix/identity/api/v1/validate/email/requestToken", + "%s/_matrix/identity/api/v1/validate/email/requestToken" + % (id_server_url,), params, ) return data @@ -424,7 +475,7 @@ class IdentityHandler(BaseHandler): async def requestMsisdnToken( self, - id_server: str, + id_server_url: str, country: str, phone_number: str, client_secret: str, @@ -435,7 +486,7 @@ class IdentityHandler(BaseHandler): Request an external server send an SMS message on our behalf for the purposes of threepid validation. Args: - id_server: The identity server to proxy to + id_server_url: The identity server to proxy to country: The country code of the phone number phone_number: The number to send the message to client_secret: The unique client_secret sends by the user @@ -463,9 +514,13 @@ class IdentityHandler(BaseHandler): "details and update your config file." ) + # if we have a rewrite rule set for the identity server, + # apply it now. + id_server_url = self.rewrite_id_server_url(id_server_url) try: data = await self.http_client.post_json_get_json( - id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken", + "%s/_matrix/identity/api/v1/validate/msisdn/requestToken" + % (id_server_url,), params, ) except HttpResponseException as e: @@ -559,6 +614,86 @@ class IdentityHandler(BaseHandler): logger.warning("Error contacting msisdn account_threepid_delegate: %s", e) raise SynapseError(400, "Error contacting the identity server") + # TODO: The following two methods are used for proxying IS requests using + # the CS API. They should be consolidated with those in RoomMemberHandler + # https://github.com/matrix-org/synapse-dinsic/issues/25 + + async def proxy_lookup_3pid( + self, id_server: str, medium: str, address: str + ) -> JsonDict: + """Looks up a 3pid in the passed identity server. + + Args: + id_server: The server name (including port, if required) + of the identity server to use. + medium: The type of the third party identifier (e.g. "email"). + address: The third party identifier (e.g. "foo@example.com"). + + Returns: + The result of the lookup. See + https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup + for details + """ + if not self._enable_lookup: + raise AuthError( + 403, "Looking up third-party identifiers is denied from this server" + ) + + id_server_url = self.rewrite_id_server_url(id_server, add_https=True) + + try: + data = await self.http_client.get_json( + "%s/_matrix/identity/api/v1/lookup" % (id_server_url,), + {"medium": medium, "address": address}, + ) + + except HttpResponseException as e: + logger.info("Proxied lookup failed: %r", e) + raise e.to_synapse_error() + except IOError as e: + logger.info("Failed to contact %s: %s", id_server, e) + raise ProxiedRequestError(503, "Failed to contact identity server") + + return data + + async def proxy_bulk_lookup_3pid( + self, id_server: str, threepids: List[List[str]] + ) -> JsonDict: + """Looks up given 3pids in the passed identity server. + + Args: + id_server: The server name (including port, if required) + of the identity server to use. + threepids: The third party identifiers to lookup, as + a list of 2-string sized lists ([medium, address]). + + Returns: + The result of the lookup. See + https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup + for details + """ + if not self._enable_lookup: + raise AuthError( + 403, "Looking up third-party identifiers is denied from this server" + ) + + id_server_url = self.rewrite_id_server_url(id_server, add_https=True) + + try: + data = await self.http_client.post_json_get_json( + "%s/_matrix/identity/api/v1/bulk_lookup" % (id_server_url,), + {"threepids": threepids}, + ) + + except HttpResponseException as e: + logger.info("Proxied lookup failed: %r", e) + raise e.to_synapse_error() + except IOError as e: + logger.info("Failed to contact %s: %s", id_server, e) + raise ProxiedRequestError(503, "Failed to contact identity server") + + return data + async def lookup_3pid( self, id_server: str, @@ -579,10 +714,13 @@ class IdentityHandler(BaseHandler): Returns: the matrix ID of the 3pid, or None if it is not recognized. """ + # Rewrite id_server URL if necessary + id_server_url = self.rewrite_id_server_url(id_server, add_https=True) + if id_access_token is not None: try: results = await self._lookup_3pid_v2( - id_server, id_access_token, medium, address + id_server_url, id_access_token, medium, address ) return results @@ -600,16 +738,17 @@ class IdentityHandler(BaseHandler): logger.warning("Error when looking up hashing details: %s", e) return None - return await self._lookup_3pid_v1(id_server, medium, address) + return await self._lookup_3pid_v1(id_server, id_server_url, medium, address) async def _lookup_3pid_v1( - self, id_server: str, medium: str, address: str + self, id_server: str, id_server_url: str, medium: str, address: str ) -> Optional[str]: """Looks up a 3pid in the passed identity server using v1 lookup. Args: id_server: The server name (including port, if required) of the identity server to use. + id_server_url: The actual, reachable domain of the id server medium: The type of the third party identifier (e.g. "email"). address: The third party identifier (e.g. "foo@example.com"). @@ -617,8 +756,8 @@ class IdentityHandler(BaseHandler): the matrix ID of the 3pid, or None if it is not recognized. """ try: - data = await self.blacklisting_http_client.get_json( - "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server), + data = await self.http_client.get_json( + "%s/_matrix/identity/api/v1/lookup" % (id_server_url,), {"medium": medium, "address": address}, ) @@ -635,13 +774,12 @@ class IdentityHandler(BaseHandler): return None async def _lookup_3pid_v2( - self, id_server: str, id_access_token: str, medium: str, address: str + self, id_server_url: str, id_access_token: str, medium: str, address: str ) -> Optional[str]: """Looks up a 3pid in the passed identity server using v2 lookup. Args: - id_server: The server name (including port, if required) - of the identity server to use. + id_server_url: The protocol scheme and domain of the id server id_access_token: The access token to authenticate to the identity server with medium: The type of the third party identifier (e.g. "email"). address: The third party identifier (e.g. "foo@example.com"). @@ -651,8 +789,8 @@ class IdentityHandler(BaseHandler): """ # Check what hashing details are supported by this identity server try: - hash_details = await self.blacklisting_http_client.get_json( - "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), + hash_details = await self.http_client.get_json( + "%s/_matrix/identity/v2/hash_details" % (id_server_url,), {"access_token": id_access_token}, ) except TimeoutError: @@ -660,15 +798,14 @@ class IdentityHandler(BaseHandler): if not isinstance(hash_details, dict): logger.warning( - "Got non-dict object when checking hash details of %s%s: %s", - id_server_scheme, - id_server, + "Got non-dict object when checking hash details of %s: %s", + id_server_url, hash_details, ) raise SynapseError( 400, - "Non-dict object from %s%s during v2 hash_details request: %s" - % (id_server_scheme, id_server, hash_details), + "Non-dict object from %s during v2 hash_details request: %s" + % (id_server_url, hash_details), ) # Extract information from hash_details @@ -682,8 +819,8 @@ class IdentityHandler(BaseHandler): ): raise SynapseError( 400, - "Invalid hash details received from identity server %s%s: %s" - % (id_server_scheme, id_server, hash_details), + "Invalid hash details received from identity server %s: %s" + % (id_server_url, hash_details), ) # Check if any of the supported lookup algorithms are present @@ -705,7 +842,7 @@ class IdentityHandler(BaseHandler): else: logger.warning( "None of the provided lookup algorithms of %s are supported: %s", - id_server, + id_server_url, supported_lookup_algorithms, ) raise SynapseError( @@ -718,8 +855,8 @@ class IdentityHandler(BaseHandler): headers = {"Authorization": create_id_access_token_header(id_access_token)} try: - lookup_results = await self.blacklisting_http_client.post_json_get_json( - "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), + lookup_results = await self.http_client.post_json_get_json( + "%s/_matrix/identity/v2/lookup" % (id_server_url,), { "addresses": [lookup_value], "algorithm": lookup_algorithm, @@ -804,15 +941,17 @@ class IdentityHandler(BaseHandler): "sender_avatar_url": inviter_avatar_url, } + # Rewrite the identity server URL if necessary + id_server_url = self.rewrite_id_server_url(id_server, add_https=True) + # Add the identity service access token to the JSON body and use the v2 # Identity Service endpoints if id_access_token is present data = None - base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server) + base_url = "%s/_matrix/identity" % (id_server_url,) if id_access_token: - key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % ( - id_server_scheme, - id_server, + key_validity_url = "%s/_matrix/identity/v2/pubkey/isvalid" % ( + id_server_url, ) # Attempt a v2 lookup @@ -831,9 +970,8 @@ class IdentityHandler(BaseHandler): raise e if data is None: - key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % ( - id_server_scheme, - id_server, + key_validity_url = "%s/_matrix/identity/api/v1/pubkey/isvalid" % ( + id_server_url, ) url = base_url + "/api/v1/store-invite" @@ -845,10 +983,7 @@ class IdentityHandler(BaseHandler): raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.warning( - "Error trying to call /store-invite on %s%s: %s", - id_server_scheme, - id_server, - e, + "Error trying to call /store-invite on %s: %s", id_server_url, e, ) if data is None: @@ -861,10 +996,9 @@ class IdentityHandler(BaseHandler): ) except HttpResponseException as e: logger.warning( - "Error calling /store-invite on %s%s with fallback " + "Error calling /store-invite on %s with fallback " "encoding: %s", - id_server_scheme, - id_server, + id_server_url, e, ) raise e @@ -885,6 +1019,39 @@ class IdentityHandler(BaseHandler): display_name = data["display_name"] return token, public_keys, fallback_public_key, display_name + async def bind_email_using_internal_sydent_api( + self, id_server_url: str, email: str, user_id: str, + ): + """Bind an email to a fully qualified user ID using the internal API of an + instance of Sydent. + + Args: + id_server_url: The URL of the Sydent instance + email: The email address to bind + user_id: The user ID to bind the email to + + Raises: + HTTPResponseException: On a non-2xx HTTP response. + """ + # Extract the domain name from the IS URL as we store IS domains instead of URLs + id_server = urllib.parse.urlparse(id_server_url).hostname + + # id_server_url is assumed to have no trailing slashes + url = id_server_url + "/_matrix/identity/internal/bind" + body = { + "address": email, + "medium": "email", + "mxid": user_id, + } + + # Bind the threepid + await self.http_client.post_json_get_json(url, body) + + # Remember where we bound the threepid + await self.store.add_user_bound_threepid( + user_id=user_id, medium="email", address=email, id_server=id_server, + ) + def create_id_access_token_header(id_access_token: str) -> List[str]: """Create an Authorization header for passing to SimpleHttpClient as the header value diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index d5ddc583ad..ddb8f0712b 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py
@@ -116,14 +116,13 @@ class InitialSyncHandler(BaseHandler): now_token = self.hs.get_event_sources().get_current_token() presence_stream = self.hs.get_event_sources().sources["presence"] - pagination_config = PaginationConfig(from_token=now_token) - presence, _ = await presence_stream.get_pagination_rows( - user, pagination_config.get_source_config("presence"), None + presence, _ = await presence_stream.get_new_events( + user, from_key=None, include_offline=False ) - receipt_stream = self.hs.get_event_sources().sources["receipt"] - receipt, _ = await receipt_stream.get_pagination_rows( - user, pagination_config.get_source_config("receipt"), None + joined_rooms = [r.room_id for r in room_list if r.membership == Membership.JOIN] + receipt = await self.store.get_linearized_receipts_for_rooms( + joined_rooms, to_key=int(now_token.receipt_key), ) tags_by_room = await self.store.get_tags_for_user(user_id) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 8a7b4916cd..4d011dfe9a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py
@@ -59,6 +59,7 @@ from synapse.visibility import filter_events_for_client from ._base import BaseHandler if TYPE_CHECKING: + from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -396,7 +397,9 @@ class EventCreationHandler: self.action_generator = hs.get_action_generator() self.spam_checker = hs.get_spam_checker() - self.third_party_event_rules = hs.get_third_party_event_rules() + self.third_party_event_rules = ( + self.hs.get_third_party_event_rules() + ) # type: ThirdPartyEventRules self._block_events_without_consent_error = ( self.config.block_events_without_consent_error @@ -1145,7 +1148,7 @@ class EventCreationHandler: # If there's an expiry timestamp on the event, schedule its expiry. self._message_handler.maybe_schedule_expiry(event) - await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id) + await self.pusher_pool.on_new_notifications(max_stream_id) def _notify(): try: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 34ed0e2921..ec17d3d888 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py
@@ -335,20 +335,16 @@ class PaginationHandler: user_id = requester.user.to_string() if pagin_config.from_token: - room_token = pagin_config.from_token.room_key + from_token = pagin_config.from_token else: - pagin_config.from_token = ( - self.hs.get_event_sources().get_current_token_for_pagination() - ) - room_token = pagin_config.from_token.room_key - - room_token = RoomStreamToken.parse(room_token) + from_token = self.hs.get_event_sources().get_current_token_for_pagination() - pagin_config.from_token = pagin_config.from_token.copy_and_replace( - "room_key", str(room_token) - ) + if pagin_config.limit is None: + # This shouldn't happen as we've set a default limit before this + # gets called. + raise Exception("limit not set") - source_config = pagin_config.get_source_config("room") + room_token = RoomStreamToken.parse(from_token.room_key) with await self.pagination_lock.read(room_id): ( @@ -358,7 +354,7 @@ class PaginationHandler: room_id, user_id, allow_departed_users=True ) - if source_config.direction == "b": + if pagin_config.direction == "b": # if we're going backwards, we might need to backfill. This # requires that we have a topo token. if room_token.topological: @@ -377,26 +373,35 @@ class PaginationHandler: # case "JOIN" would have been returned. assert member_event_id - leave_token = await self.store.get_topological_token_for_event( + leave_token_str = await self.store.get_topological_token_for_event( member_event_id ) - if RoomStreamToken.parse(leave_token).topological < max_topo: - source_config.from_key = str(leave_token) + leave_token = RoomStreamToken.parse(leave_token_str) + assert leave_token.topological is not None + + if leave_token.topological < max_topo: + from_token = from_token.copy_and_replace( + "room_key", leave_token_str + ) await self.hs.get_handlers().federation_handler.maybe_backfill( room_id, max_topo ) + to_room_key = None + if pagin_config.to_token: + to_room_key = pagin_config.to_token.room_key + events, next_key = await self.store.paginate_room_events( room_id=room_id, - from_key=source_config.from_key, - to_key=source_config.to_key, - direction=source_config.direction, - limit=source_config.limit, + from_key=from_token.room_key, + to_key=to_room_key, + direction=pagin_config.direction, + limit=pagin_config.limit, event_filter=event_filter, ) - next_token = pagin_config.from_token.copy_and_replace("room_key", next_key) + next_token = from_token.copy_and_replace("room_key", next_key) if events: if event_filter: @@ -409,7 +414,7 @@ class PaginationHandler: if not events: return { "chunk": [], - "start": pagin_config.from_token.to_string(), + "start": from_token.to_string(), "end": next_token.to_string(), } @@ -438,7 +443,7 @@ class PaginationHandler: events, time_now, as_client_event=as_client_event ) ), - "start": pagin_config.from_token.to_string(), + "start": from_token.to_string(), "end": next_token.to_string(), } diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 91a3aec1cc..1000ac95ff 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py
@@ -1108,9 +1108,6 @@ class PresenceEventSource: def get_current_key(self): return self.store.get_current_presence_token() - async def get_pagination_rows(self, user, pagination_config, key): - return await self.get_new_events(user, from_key=None, include_offline=False) - @cached(num_args=2, cache_context=True) async def _get_interested_in(self, user, explicit_room_id, cache_context): """Returns the set of users that the given user should see presence diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 0cb8fad89a..5c9579394c 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py
@@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +16,11 @@ import logging import random +from typing import List + +from signedjson.sign import sign_json + +from twisted.internet import reactor from synapse.api.errors import ( AuthError, @@ -24,6 +30,7 @@ from synapse.api.errors import ( StoreError, SynapseError, ) +from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import UserID, create_requester, get_domain_from_id @@ -43,6 +50,8 @@ class BaseProfileHandler(BaseHandler): subclass MasterProfileHandler """ + PROFILE_REPLICATE_INTERVAL = 2 * 60 * 1000 + def __init__(self, hs): super(BaseProfileHandler, self).__init__(hs) @@ -53,6 +62,98 @@ class BaseProfileHandler(BaseHandler): self.user_directory_handler = hs.get_user_directory_handler() + self.http_client = hs.get_simple_http_client() + + self.max_avatar_size = hs.config.max_avatar_size + self.allowed_avatar_mimetypes = hs.config.allowed_avatar_mimetypes + self.replicate_user_profiles_to = hs.config.replicate_user_profiles_to + + if hs.config.worker_app is None: + self.clock.looping_call( + self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS + ) + + if len(self.hs.config.replicate_user_profiles_to) > 0: + reactor.callWhenRunning(self._do_assign_profile_replication_batches) + reactor.callWhenRunning(self._start_replicate_profiles) + # Add a looping call to replicate_profiles: this handles retries + # if the replication is unsuccessful when the user updated their + # profile. + self.clock.looping_call( + self._start_replicate_profiles, self.PROFILE_REPLICATE_INTERVAL + ) + + def _do_assign_profile_replication_batches(self): + return run_as_background_process( + "_assign_profile_replication_batches", + self._assign_profile_replication_batches, + ) + + def _start_replicate_profiles(self): + return run_as_background_process( + "_replicate_profiles", self._replicate_profiles + ) + + async def _assign_profile_replication_batches(self): + """If no profile replication has been done yet, allocate replication batch + numbers to each profile to start the replication process. + """ + logger.info("Assigning profile batch numbers...") + total = 0 + while True: + assigned = await self.store.assign_profile_batch() + total += assigned + if assigned == 0: + break + logger.info("Assigned %d profile batch numbers", total) + + async def _replicate_profiles(self): + """If any profile data has been updated and not pushed to the replication targets, + replicate it. + """ + host_batches = await self.store.get_replication_hosts() + latest_batch = await self.store.get_latest_profile_replication_batch_number() + if latest_batch is None: + latest_batch = -1 + for repl_host in self.hs.config.replicate_user_profiles_to: + if repl_host not in host_batches: + host_batches[repl_host] = -1 + try: + for i in range(host_batches[repl_host] + 1, latest_batch + 1): + await self._replicate_host_profile_batch(repl_host, i) + except Exception: + logger.exception( + "Exception while replicating to %s: aborting for now", repl_host + ) + + async def _replicate_host_profile_batch(self, host, batchnum): + logger.info("Replicating profile batch %d to %s", batchnum, host) + batch_rows = await self.store.get_profile_batch(batchnum) + batch = { + UserID(r["user_id"], self.hs.hostname).to_string(): ( + {"display_name": r["displayname"], "avatar_url": r["avatar_url"]} + if r["active"] + else None + ) + for r in batch_rows + } + + url = "https://%s/_matrix/identity/api/v1/replicate_profiles" % (host,) + body = {"batchnum": batchnum, "batch": batch, "origin_server": self.hs.hostname} + signed_body = sign_json(body, self.hs.hostname, self.hs.config.signing_key[0]) + try: + await self.http_client.post_json_get_json(url, signed_body) + await self.store.update_replication_batch_for_host(host, batchnum) + logger.info( + "Successfully replicated profile batch %d to %s", batchnum, host + ) + except Exception: + # This will get retried when the looping call next comes around + logger.exception( + "Failed to replicate profile batch %d to %s", batchnum, host + ) + raise + async def get_profile(self, user_id): target_user = UserID.from_string(user_id) @@ -149,7 +250,7 @@ class BaseProfileHandler(BaseHandler): if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this homeserver") - if not by_admin and target_user != requester.user: + if not by_admin and requester and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") if not by_admin and not self.hs.config.enable_set_displayname: @@ -172,13 +273,23 @@ class BaseProfileHandler(BaseHandler): if new_displayname == "": new_displayname = None + if len(self.hs.config.replicate_user_profiles_to) > 0: + cur_batchnum = ( + await self.store.get_latest_profile_replication_batch_number() + ) + new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1 + else: + new_batchnum = None + # If the admin changes the display name of a user, the requesting user cannot send # the join event to update the displayname in the rooms. # This must be done by the target user himself. if by_admin: requester = create_requester(target_user) - await self.store.set_profile_displayname(target_user.localpart, new_displayname) + await self.store.set_profile_displayname( + target_user.localpart, new_displayname, new_batchnum + ) if self.hs.config.user_directory_search_all_users: profile = await self.store.get_profileinfo(target_user.localpart) @@ -188,6 +299,46 @@ class BaseProfileHandler(BaseHandler): await self._update_join_states(requester, target_user) + # start a profile replication push + run_in_background(self._replicate_profiles) + + async def set_active( + self, users: List[UserID], active: bool, hide: bool, + ): + """ + Sets the 'active' flag on a set of user profiles. If set to false, the + accounts are considered deactivated or hidden. + + If 'hide' is true, then we interpret active=False as a request to try to + hide the users rather than deactivating them. This means withholding the + profiles from replication (and mark it as inactive) rather than clearing + the profile from the HS DB. + + Note that unlike set_displayname and set_avatar_url, this does *not* + perform authorization checks! This is because the only place it's used + currently is in account deactivation where we've already done these + checks anyway. + + Args: + users: The users to modify + active: Whether to set the user to active or inactive + hide: Whether to hide the user (withold from replication). If + False and active is False, user will have their profile + erased + """ + if len(self.replicate_user_profiles_to) > 0: + cur_batchnum = ( + await self.store.get_latest_profile_replication_batch_number() + ) + new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1 + else: + new_batchnum = None + + await self.store.set_profiles_active(users, active, hide, new_batchnum) + + # start a profile replication push + run_in_background(self._replicate_profiles) + async def get_avatar_url(self, target_user): if self.hs.is_mine(target_user): try: @@ -246,11 +397,51 @@ class BaseProfileHandler(BaseHandler): 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,) ) + # Enforce a max avatar size if one is defined + if self.max_avatar_size or self.allowed_avatar_mimetypes: + media_id = self._validate_and_parse_media_id_from_avatar_url(new_avatar_url) + + # Check that this media exists locally + media_info = await self.store.get_local_media(media_id) + if not media_info: + raise SynapseError( + 400, "Unknown media id supplied", errcode=Codes.NOT_FOUND + ) + + # Ensure avatar does not exceed max allowed avatar size + media_size = media_info["media_length"] + if self.max_avatar_size and media_size > self.max_avatar_size: + raise SynapseError( + 400, + "Avatars must be less than %s bytes in size" + % (self.max_avatar_size,), + errcode=Codes.TOO_LARGE, + ) + + # Ensure the avatar's file type is allowed + if ( + self.allowed_avatar_mimetypes + and media_info["media_type"] not in self.allowed_avatar_mimetypes + ): + raise SynapseError( + 400, "Avatar file type '%s' not allowed" % media_info["media_type"] + ) + # Same like set_displayname if by_admin: requester = create_requester(target_user) - await self.store.set_profile_avatar_url(target_user.localpart, new_avatar_url) + if len(self.hs.config.replicate_user_profiles_to) > 0: + cur_batchnum = ( + await self.store.get_latest_profile_replication_batch_number() + ) + new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1 + else: + new_batchnum = None + + await self.store.set_profile_avatar_url( + target_user.localpart, new_avatar_url, new_batchnum + ) if self.hs.config.user_directory_search_all_users: profile = await self.store.get_profileinfo(target_user.localpart) @@ -260,6 +451,23 @@ class BaseProfileHandler(BaseHandler): await self._update_join_states(requester, target_user) + # start a profile replication push + run_in_background(self._replicate_profiles) + + def _validate_and_parse_media_id_from_avatar_url(self, mxc): + """Validate and parse a provided avatar url and return the local media id + + Args: + mxc (str): A mxc URL + + Returns: + str: The ID of the media + """ + avatar_pieces = mxc.split("/") + if len(avatar_pieces) != 4 or avatar_pieces[0] != "mxc:": + raise SynapseError(400, "Invalid avatar URL '%s' supplied" % mxc) + return avatar_pieces[-1] + async def on_profile_query(self, args): user = UserID.from_string(args["user_id"]) if not self.hs.is_mine(user): diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index 2cc6c2eb68..bdd8e52edd 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py
@@ -142,18 +142,3 @@ class ReceiptEventSource: def get_current_key(self, direction="f"): return self.store.get_max_receipt_stream_id() - - async def get_pagination_rows(self, user, config, key): - to_key = int(config.from_key) - - if config.to_key: - from_key = int(config.to_key) - else: - from_key = None - - room_ids = await self.store.get_rooms_for_user(user.to_string()) - events = await self.store.get_linearized_receipts_for_rooms( - room_ids, from_key=from_key, to_key=to_key - ) - - return (events, to_key) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index cde2dbca92..83baddb4fc 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py
@@ -48,6 +48,7 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() + self.http_client = hs.get_simple_http_client() self.identity_handler = self.hs.get_handlers().identity_handler self.ratelimiter = hs.get_registration_ratelimiter() self.macaroon_gen = hs.get_macaroon_generator() @@ -55,6 +56,8 @@ class RegistrationHandler(BaseHandler): self.spam_checker = hs.get_spam_checker() + self._show_in_user_directory = self.hs.config.show_users_in_user_directory + if hs.config.worker_app: self._register_client = ReplicationRegisterServlet.make_client(hs) self._register_device_client = RegisterDeviceReplicationServlet.make_client( @@ -70,8 +73,18 @@ class RegistrationHandler(BaseHandler): self.session_lifetime = hs.config.session_lifetime async def check_username( - self, localpart, guest_access_token=None, assigned_user_id=None + self, localpart, guest_access_token=None, assigned_user_id=None, ): + """ + + Args: + localpart (str|None): The user's localpart + guest_access_token (str|None): A guest's access token + assigned_user_id (str|None): An existing User ID for this user if pre-calculated + + Returns: + Deferred + """ if types.contains_invalid_mxid_characters(localpart): raise SynapseError( 400, @@ -114,6 +127,8 @@ class RegistrationHandler(BaseHandler): raise SynapseError( 400, "User ID already taken.", errcode=Codes.USER_IN_USE ) + + # Retrieve guest user information from provided access token user_data = await self.auth.get_user_by_access_token(guest_access_token) if not user_data["is_guest"] or user_data["user"].localpart != localpart: raise AuthError( @@ -223,6 +238,11 @@ class RegistrationHandler(BaseHandler): shadow_banned=shadow_banned, ) + if default_display_name: + await self.profile_handler.set_displayname( + user, None, default_display_name, by_admin=True + ) + if self.hs.config.user_directory_search_all_users: profile = await self.store.get_profileinfo(localpart) await self.user_directory_handler.handle_local_profile_change( @@ -254,6 +274,10 @@ class RegistrationHandler(BaseHandler): shadow_banned=shadow_banned, ) + await self.profile_handler.set_displayname( + user, None, default_display_name, by_admin=True + ) + # Successfully registered break except SynapseError: @@ -287,7 +311,15 @@ class RegistrationHandler(BaseHandler): } # Bind email to new account - await self._register_email_threepid(user_id, threepid_dict, None) + await self.register_email_threepid(user_id, threepid_dict, None) + + # Prevent the new user from showing up in the user directory if the server + # mandates it. + if not self._show_in_user_directory: + await self.store.add_account_data_for_user( + user_id, "im.vector.hide_profile", {"hide_profile": True} + ) + await self.profile_handler.set_active([user], False, True) return user_id @@ -481,7 +513,10 @@ class RegistrationHandler(BaseHandler): """ await self._auto_join_rooms(user_id) - async def appservice_register(self, user_localpart, as_token): + async def appservice_register( + self, user_localpart, as_token, password_hash, display_name + ): + # FIXME: this should be factored out and merged with normal register() user = UserID(user_localpart, self.hs.hostname) user_id = user.to_string() service = self.store.get_app_service_by_token(as_token) @@ -498,12 +533,25 @@ class RegistrationHandler(BaseHandler): self.check_user_id_not_appservice_exclusive(user_id, allowed_appservice=service) + display_name = display_name or user.localpart + await self.register_with_store( user_id=user_id, - password_hash="", + password_hash=password_hash, appservice_id=service_id, - create_profile_with_displayname=user.localpart, + create_profile_with_displayname=display_name, + ) + + await self.profile_handler.set_displayname( + user, None, display_name, by_admin=True ) + + if self.hs.config.user_directory_search_all_users: + profile = await self.store.get_profileinfo(user_localpart) + await self.user_directory_handler.handle_local_profile_change( + user_id, profile + ) + return user_id def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None): @@ -530,6 +578,49 @@ class RegistrationHandler(BaseHandler): errcode=Codes.EXCLUSIVE, ) + async def shadow_register(self, localpart, display_name, auth_result, params): + """Invokes the current registration on another server, using + shared secret registration, passing in any auth_results from + other registration UI auth flows (e.g. validated 3pids) + Useful for setting up shadow/backup accounts on a parallel deployment. + """ + + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + await self.http_client.post_json_get_json( + "%s/_matrix/client/r0/register?access_token=%s" % (shadow_hs_url, as_token), + { + # XXX: auth_result is an unspecified extension for shadow registration + "auth_result": auth_result, + # XXX: another unspecified extension for shadow registration to ensure + # that the displayname is correctly set by the masters erver + "display_name": display_name, + "username": localpart, + "password": params.get("password"), + "bind_msisdn": params.get("bind_msisdn"), + "device_id": params.get("device_id"), + "initial_device_display_name": params.get( + "initial_device_display_name" + ), + "inhibit_login": False, + "access_token": as_token, + }, + ) + + async def _generate_user_id(self): + if self._next_generated_user_id is None: + with await self._generate_user_id_linearizer.queue(()): + if self._next_generated_user_id is None: + self._next_generated_user_id = ( + await self.store.find_next_generated_user_id_localpart() + ) + + id = self._next_generated_user_id + self._next_generated_user_id += 1 + return str(id) + def check_registration_ratelimit(self, address): """A simple helper method to check whether the registration rate limit has been hit for a given IP address @@ -675,6 +766,7 @@ class RegistrationHandler(BaseHandler): if auth_result and LoginType.EMAIL_IDENTITY in auth_result: threepid = auth_result[LoginType.EMAIL_IDENTITY] + # Necessary due to auth checks prior to the threepid being # written to the db if is_threepid_reserved( @@ -682,7 +774,32 @@ class RegistrationHandler(BaseHandler): ): await self.store.upsert_monthly_active_user(user_id) - await self._register_email_threepid(user_id, threepid, access_token) + await self.register_email_threepid(user_id, threepid, access_token) + + if self.hs.config.bind_new_user_emails_to_sydent: + # Attempt to call Sydent's internal bind API on the given identity server + # to bind this threepid + id_server_url = self.hs.config.bind_new_user_emails_to_sydent + + logger.debug( + "Attempting the bind email of %s to identity server: %s using " + "internal Sydent bind API.", + user_id, + self.hs.config.bind_new_user_emails_to_sydent, + ) + + try: + await self.identity_handler.bind_email_using_internal_sydent_api( + id_server_url, threepid["address"], user_id + ) + except Exception as e: + logger.warning( + "Failed to bind email of '%s' to Sydent instance '%s' ", + "using Sydent internal bind API: %s", + user_id, + id_server_url, + e, + ) if auth_result and LoginType.MSISDN in auth_result: threepid = auth_result[LoginType.MSISDN] @@ -703,7 +820,7 @@ class RegistrationHandler(BaseHandler): await self.store.user_set_consent_version(user_id, consent_version) await self.post_consent_actions(user_id) - async def _register_email_threepid(self, user_id, threepid, token): + async def register_email_threepid(self, user_id, threepid, token): """Add an email address as a 3pid identifier Also adds an email pusher for the email address, if configured in the diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index a29305f655..ca47306d39 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py
@@ -358,7 +358,19 @@ class RoomCreationHandler(BaseHandler): """ user_id = requester.user.to_string() - if not self.spam_checker.user_may_create_room(user_id): + if ( + self._server_notices_mxid is not None + and requester.user.to_string() == self._server_notices_mxid + ): + # allow the server notices mxid to create rooms + is_requester_admin = True + + else: + is_requester_admin = await self.auth.is_server_admin(requester.user) + + if not is_requester_admin and not self.spam_checker.user_may_create_room( + user_id, invite_list=[], third_party_invite_list=[], cloning=True + ): raise SynapseError(403, "You are not permitted to create rooms") creation_content = { @@ -608,8 +620,14 @@ class RoomCreationHandler(BaseHandler): 403, "You are not permitted to create rooms", Codes.FORBIDDEN ) + invite_list = config.get("invite", []) + invite_3pid_list = config.get("invite_3pid", []) + if not is_requester_admin and not self.spam_checker.user_may_create_room( - user_id + user_id, + invite_list=invite_list, + third_party_invite_list=invite_3pid_list, + cloning=False, ): raise SynapseError(403, "You are not permitted to create rooms") @@ -681,6 +699,15 @@ class RoomCreationHandler(BaseHandler): creator_id=user_id, is_public=is_public, room_version=room_version, ) + # Check whether this visibility value is blocked by a third party module + allowed_by_third_party_rules = await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) + ) + if not allowed_by_third_party_rules: + raise SynapseError(403, "Room visibility value not allowed.") + directory_handler = self.hs.get_handlers().directory_handler if room_alias: await directory_handler.create_association( @@ -777,6 +804,7 @@ class RoomCreationHandler(BaseHandler): "invite", ratelimit=False, content=content, + new_room=True, ) for invite_3pid in invite_3pid_list: @@ -794,6 +822,7 @@ class RoomCreationHandler(BaseHandler): id_server, requester, txn_id=None, + new_room=True, id_access_token=id_access_token, ) @@ -868,6 +897,7 @@ class RoomCreationHandler(BaseHandler): "join", ratelimit=False, content=creator_join_profile, + new_room=True, ) # We treat the power levels override specially as this needs to be one diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 32b7e323fa..a5efabece9 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py
@@ -40,7 +40,7 @@ from synapse.events.validator import EventValidator from synapse.storage.roommember import RoomsForUser from synapse.types import JsonDict, Requester, RoomAlias, RoomID, StateMap, UserID from synapse.util.async_helpers import Linearizer -from synapse.util.distributor import user_joined_room, user_left_room +from synapse.util.distributor import user_left_room from ._base import BaseHandler @@ -72,6 +72,7 @@ class RoomMemberHandler: self.registration_handler = hs.get_registration_handler() self.profile_handler = hs.get_profile_handler() self.event_creation_handler = hs.get_event_creation_handler() + self.identity_handler = hs.get_handlers().identity_handler self.member_linearizer = Linearizer(name="member") @@ -149,17 +150,6 @@ class RoomMemberHandler: raise NotImplementedError() @abc.abstractmethod - async def _user_joined_room(self, target: UserID, room_id: str) -> None: - """Notifies distributor on master process that the user has joined the - room. - - Args: - target - room_id - """ - raise NotImplementedError() - - @abc.abstractmethod async def _user_left_room(self, target: UserID, room_id: str) -> None: """Notifies distributor on master process that the user has left the room. @@ -221,7 +211,6 @@ class RoomMemberHandler: prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) - newly_joined = False if event.membership == Membership.JOIN: newly_joined = True if prev_member_event_id: @@ -246,12 +235,7 @@ class RoomMemberHandler: requester, event, context, extra_users=[target], ratelimit=ratelimit, ) - if event.membership == Membership.JOIN and newly_joined: - # Only fire user_joined_room if the user has actually joined the - # room. Don't bother if the user is just changing their profile - # info. - await self._user_joined_room(target, room_id) - elif event.membership == Membership.LEAVE: + if event.membership == Membership.LEAVE: if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) if prev_member_event.membership == Membership.JOIN: @@ -306,6 +290,7 @@ class RoomMemberHandler: third_party_signed: Optional[dict] = None, ratelimit: bool = True, content: Optional[dict] = None, + new_room: bool = False, require_consent: bool = True, ) -> Tuple[str, int]: """Update a user's membership in a room. @@ -346,6 +331,7 @@ class RoomMemberHandler: third_party_signed=third_party_signed, ratelimit=ratelimit, content=content, + new_room=new_room, require_consent=require_consent, ) @@ -362,6 +348,7 @@ class RoomMemberHandler: third_party_signed: Optional[dict] = None, ratelimit: bool = True, content: Optional[dict] = None, + new_room: bool = False, require_consent: bool = True, ) -> Tuple[str, int]: content_specified = bool(content) @@ -426,8 +413,15 @@ class RoomMemberHandler: ) block_invite = True + is_published = await self.store.is_room_published(room_id) + if not self.spam_checker.user_may_invite( - requester.user.to_string(), target.to_string(), room_id + requester.user.to_string(), + target.to_string(), + third_party_invite=None, + room_id=room_id, + new_room=new_room, + published_room=is_published, ): logger.info("Blocking invite due to spam checker") block_invite = True @@ -505,6 +499,25 @@ class RoomMemberHandler: # so don't really fit into the general auth process. raise AuthError(403, "Guest access not allowed") + if ( + self._server_notices_mxid is not None + and requester.user.to_string() == self._server_notices_mxid + ): + # allow the server notices mxid to join rooms + is_requester_admin = True + + else: + is_requester_admin = await self.auth.is_server_admin(requester.user) + + inviter = await self._get_inviter(target.to_string(), room_id) + if not is_requester_admin: + # We assume that if the spam checker allowed the user to create + # a room then they're allowed to join it. + if not new_room and not self.spam_checker.user_may_join_room( + target.to_string(), room_id, is_invited=inviter is not None + ): + raise SynapseError(403, "Not allowed to join this room") + if not is_host_in_room: time_now_s = self.clock.time() ( @@ -726,17 +739,7 @@ class RoomMemberHandler: (EventTypes.Member, event.state_key), None ) - if event.membership == Membership.JOIN: - # Only fire user_joined_room if the user has actually joined the - # room. Don't bother if the user is just changing their profile - # info. - newly_joined = True - if prev_member_event_id: - prev_member_event = await self.store.get_event(prev_member_event_id) - newly_joined = prev_member_event.membership != Membership.JOIN - if newly_joined: - await self._user_joined_room(target_user, room_id) - elif event.membership == Membership.LEAVE: + if event.membership == Membership.LEAVE: if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) if prev_member_event.membership == Membership.JOIN: @@ -807,6 +810,7 @@ class RoomMemberHandler: id_server: str, requester: Requester, txn_id: Optional[str], + new_room: bool = False, id_access_token: Optional[str] = None, ) -> int: """Invite a 3PID to a room. @@ -854,6 +858,16 @@ class RoomMemberHandler: Codes.FORBIDDEN, ) + can_invite = await self.third_party_event_rules.check_threepid_can_be_invited( + medium, address, room_id + ) + if not can_invite: + raise SynapseError( + 403, + "This third-party identifier can not be invited in this room", + Codes.FORBIDDEN, + ) + if not self._enable_lookup: raise SynapseError( 403, "Looking up third-party identifiers is denied from this server" @@ -863,6 +877,19 @@ class RoomMemberHandler: id_server, medium, address, id_access_token ) + is_published = await self.store.is_room_published(room_id) + + if not self.spam_checker.user_may_invite( + requester.user.to_string(), + invitee, + third_party_invite={"medium": medium, "address": address}, + room_id=room_id, + new_room=new_room, + published_room=is_published, + ): + logger.info("Blocking invite due to spam checker") + raise SynapseError(403, "Invites have been disabled on this server") + if invitee: # Note that update_membership with an action of "invite" can raise # a ShadowBanError, but this was done above already. @@ -1002,10 +1029,9 @@ class RoomMemberHandler: class RoomMemberMasterHandler(RoomMemberHandler): def __init__(self, hs): - super(RoomMemberMasterHandler, self).__init__(hs) + super().__init__(hs) self.distributor = hs.get_distributor() - self.distributor.declare("user_joined_room") self.distributor.declare("user_left_room") async def _is_remote_room_too_complex( @@ -1085,7 +1111,6 @@ class RoomMemberMasterHandler(RoomMemberHandler): event_id, stream_id = await self.federation_handler.do_invite_join( remote_room_hosts, room_id, user.to_string(), content ) - await self._user_joined_room(user, room_id) # Check the room we just joined wasn't too large, if we didn't fetch the # complexity of it before. @@ -1228,11 +1253,6 @@ class RoomMemberMasterHandler(RoomMemberHandler): ) return event.event_id, stream_id - async def _user_joined_room(self, target: UserID, room_id: str) -> None: - """Implements RoomMemberHandler._user_joined_room - """ - user_joined_room(self.distributor, target, room_id) - async def _user_left_room(self, target: UserID, room_id: str) -> None: """Implements RoomMemberHandler._user_left_room """ diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py
index 897338fd54..e7f34737c6 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py
@@ -57,8 +57,6 @@ class RoomMemberWorkerHandler(RoomMemberHandler): content=content, ) - await self._user_joined_room(user, room_id) - return ret["event_id"], ret["stream_id"] async def remote_reject_invite( @@ -81,13 +79,6 @@ class RoomMemberWorkerHandler(RoomMemberHandler): ) return ret["event_id"], ret["stream_id"] - async def _user_joined_room(self, target: UserID, room_id: str) -> None: - """Implements RoomMemberHandler._user_joined_room - """ - await self._notify_change_client( - user_id=target.to_string(), room_id=room_id, change="joined" - ) - async def _user_left_room(self, target: UserID, room_id: str) -> None: """Implements RoomMemberHandler._user_left_room """ diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index 4d245b618b..5d34989f21 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py
@@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2017 New Vector Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index e2ddb628ff..cc47e8b62c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py
@@ -1310,12 +1310,11 @@ class SyncHandler: presence_source = self.event_sources.sources["presence"] since_token = sync_result_builder.since_token + presence_key = None + include_offline = False if since_token and not sync_result_builder.full_state: presence_key = since_token.presence_key include_offline = True - else: - presence_key = None - include_offline = False presence, presence_key = await presence_source.get_new_events( user=user, diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py
index fea774e2e5..becf66dd86 100644 --- a/synapse/logging/utils.py +++ b/synapse/logging/utils.py
@@ -29,11 +29,11 @@ def _log_debug_as_f(f, msg, msg_args): lineno = f.__code__.co_firstlineno pathname = f.__code__.co_filename - record = logging.LogRecord( + record = logger.makeRecord( name=name, level=logging.DEBUG, - pathname=pathname, - lineno=lineno, + fn=pathname, + lno=lineno, msg=msg, args=msg_args, exc_info=None, diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index fcbd5378c4..16ca86b7bc 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py
@@ -14,12 +14,18 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import TYPE_CHECKING from twisted.internet import defer +from synapse.events import EventBase +from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.types import UserID +from synapse.types import JsonDict, UserID, create_requester + +if TYPE_CHECKING: + from synapse.server import HomeServer """ This package defines the 'stable' API which can be used by extension modules which @@ -31,6 +37,50 @@ __all__ = ["errors", "make_deferred_yieldable", "run_in_background", "ModuleApi" logger = logging.getLogger(__name__) +class PublicRoomListManager: + """Contains methods for adding to, removing from and querying whether a room + is in the public room list. + + Args: + hs: The Homeserver object + """ + + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastore() + + async def room_is_in_public_room_list(self, room_id: str) -> bool: + """Checks whether a room is in the public room list. + + Args: + room_id: The ID of the room. + + Returns: + Whether the room is in the public room list. Returns False if the room does + not exist. + """ + room = await self._store.get_room(room_id) + if not room: + return False + + return room.get("is_public", False) + + async def add_room_to_public_room_list(self, room_id: str) -> None: + """Publishes a room to the public room list. + + Args: + room_id: The ID of the room. + """ + await self._store.set_room_is_public(room_id, True) + + async def remove_room_from_public_room_list(self, room_id: str) -> None: + """Removes a room from the public room list. + + Args: + room_id: The ID of the room. + """ + await self._store.set_room_is_public(room_id, False) + + class ModuleApi: """A proxy object that gets passed to various plugin modules so they can register new users etc if necessary. @@ -43,6 +93,9 @@ class ModuleApi: self._auth = hs.get_auth() self._auth_handler = auth_handler + self.http_client = hs.get_simple_http_client() # type: SimpleHttpClient + self.public_room_list_manager = PublicRoomListManager(hs) + def get_user_by_req(self, req, allow_guest=False): """Check the access_token provided for a request @@ -266,3 +319,30 @@ class ModuleApi: await self._auth_handler.complete_sso_login( registered_user_id, request, client_redirect_url, ) + + async def create_and_send_event_into_room(self, event_dict: JsonDict) -> EventBase: + """Create and send an event into a room. Membership events are currently not supported. + + Args: + event_dict: A dictionary representing the event to send. + Required keys are `type`, `room_id`, `sender` and `content`. + + Returns: + The event that was sent. If state event deduplication happened, then + the previous, duplicate event instead. + + Raises: + SynapseError if the event was not allowed. + """ + # Create a requester object + requester = create_requester(event_dict["sender"]) + + # Create and send the event + ( + event, + _, + ) = await self._hs.get_event_creation_handler().create_and_send_nonmember_event( + requester, event_dict, ratelimit=False + ) + + return event diff --git a/synapse/notifier.py b/synapse/notifier.py
index b7f4041306..71f2370874 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py
@@ -432,8 +432,9 @@ class Notifier: If explicit_room_id is set, that room will be polled for events only if it is world readable or the user has joined the room. """ - from_token = pagination_config.from_token - if not from_token: + if pagination_config.from_token: + from_token = pagination_config.from_token + else: from_token = self.event_sources.get_current_token() limit = pagination_config.limit diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py
index 8047873ff1..172af1a5a4 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py
@@ -482,7 +482,11 @@ BASE_APPEND_UNDERRIDE_RULES = [ "_id": "_message", } ], - "actions": ["notify", {"set_tweak": "highlight", "value": False}], + "actions": [ + "notify", + {"set_tweak": "sound", "value": "default"}, + {"set_tweak": "highlight", "value": False}, + ], }, # XXX: this is going to fire for events which aren't m.room.messages # but are encrypted (e.g. m.call.*)... @@ -496,7 +500,11 @@ BASE_APPEND_UNDERRIDE_RULES = [ "_id": "_encrypted", } ], - "actions": ["notify", {"set_tweak": "highlight", "value": False}], + "actions": [ + "notify", + {"set_tweak": "sound", "value": "default"}, + {"set_tweak": "highlight", "value": False}, + ], }, ] diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index b7ea4438e0..28bd8ab748 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py
@@ -91,7 +91,7 @@ class EmailPusher: pass self.timed_call = None - def on_new_notifications(self, min_stream_ordering, max_stream_ordering): + def on_new_notifications(self, max_stream_ordering): if self.max_stream_ordering: self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index f21fa9b659..26706bf3e1 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py
@@ -114,7 +114,7 @@ class HttpPusher: if should_check_for_notifs: self._start_processing() - def on_new_notifications(self, min_stream_ordering, max_stream_ordering): + def on_new_notifications(self, max_stream_ordering): self.max_stream_ordering = max( max_stream_ordering, self.max_stream_ordering or 0 ) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 6c57854018..455a1acb46 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py
@@ -123,7 +123,7 @@ class Mailer: params = {"token": token, "client_secret": client_secret, "sid": sid} link = ( self.hs.config.public_baseurl - + "_matrix/client/unstable/password_reset/email/submit_token?%s" + + "_synapse/client/password_reset/email/submit_token?%s" % urllib.parse.urlencode(params) ) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py
index 3c3262a88c..fa8473bf8d 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py
@@ -64,6 +64,12 @@ class PusherPool: self._pusher_shard_config = hs.config.push.pusher_shard_config self._instance_name = hs.get_instance_name() + # Record the last stream ID that we were poked about so we can get + # changes since then. We set this to the current max stream ID on + # startup as every individual pusher will have checked for changes on + # startup. + self._last_room_stream_id_seen = self.store.get_room_max_stream_ordering() + # map from user id to app_id:pushkey to pusher self.pushers = {} # type: Dict[str, Dict[str, Union[HttpPusher, EmailPusher]]] @@ -178,20 +184,27 @@ class PusherPool: ) await self.remove_pusher(p["app_id"], p["pushkey"], p["user_name"]) - async def on_new_notifications(self, min_stream_id, max_stream_id): + async def on_new_notifications(self, max_stream_id): if not self.pushers: # nothing to do here. return + if max_stream_id < self._last_room_stream_id_seen: + # Nothing to do + return + + prev_stream_id = self._last_room_stream_id_seen + self._last_room_stream_id_seen = max_stream_id + try: users_affected = await self.store.get_push_action_users_in_range( - min_stream_id, max_stream_id + prev_stream_id, max_stream_id ) for u in users_affected: if u in self.pushers: for p in self.pushers[u].values(): - p.on_new_notifications(min_stream_id, max_stream_id) + p.on_new_notifications(max_stream_id) except Exception: logger.exception("Exception in pusher on_new_notifications") diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index 2d995ec456..ff0c67228b 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py
@@ -43,7 +43,7 @@ REQUIREMENTS = [ "jsonschema>=2.5.1", "frozendict>=1", "unpaddedbase64>=1.1.0", - "canonicaljson>=1.3.0", + "canonicaljson>=1.4.0", # we use the type definitions added in signedjson 1.1. "signedjson>=1.1.0", "pynacl>=1.2.1", diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py
index 741329ab5f..08095fdf7d 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py
@@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, Optional from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict, Requester, UserID -from synapse.util.distributor import user_joined_room, user_left_room +from synapse.util.distributor import user_left_room if TYPE_CHECKING: from synapse.server import HomeServer @@ -181,9 +181,9 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): Args: room_id (str) user_id (str) - change (str): Either "joined" or "left" + change (str): "left" """ - assert change in ("joined", "left") + assert change == "left" return {} @@ -192,9 +192,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): user = UserID.from_string(user_id) - if change == "joined": - user_joined_room(self.distributor, user, room_id) - elif change == "left": + if change == "left": user_left_room(self.distributor, user, room_id) else: raise Exception("Unrecognized change: %r", change) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index d6ecf5b327..ccd3147dfd 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py
@@ -154,7 +154,8 @@ class ReplicationDataHandler: max_token = self.store.get_room_max_stream_ordering() self.notifier.on_new_room_event(event, token, max_token, extra_users) - await self.pusher_pool.on_new_notifications(token, token) + max_token = self.store.get_room_max_stream_ordering() + await self.pusher_pool.on_new_notifications(max_token) # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is diff --git a/synapse/res/templates/password_reset_confirmation.html b/synapse/res/templates/password_reset_confirmation.html new file mode 100644
index 0000000000..def4b5162b --- /dev/null +++ b/synapse/res/templates/password_reset_confirmation.html
@@ -0,0 +1,16 @@ +<html> +<head></head> +<body> +<!--Use a hidden form to resubmit the information necessary to reset the password--> +<form method="post"> + <input type="hidden" name="sid" value="{{ sid }}"> + <input type="hidden" name="token" value="{{ token }}"> + <input type="hidden" name="client_secret" value="{{ client_secret }}"> + + <p>You have requested to <strong>reset your Matrix account password</strong>. Click the link below to confirm this action. <br /><br /> + If you did not mean to do this, please close this page and your password will not be changed.</p> + <p><button type="submit">Confirm changing my password</button></p> +</form> +</body> +</html> + diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 87f927890c..5d9cdf4bde 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py
@@ -13,8 +13,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import synapse.rest.admin from synapse.http.server import JsonResource +from synapse.rest import admin from synapse.rest.client import versions from synapse.rest.client.v1 import ( directory, @@ -119,13 +119,12 @@ class ClientRestResource(JsonResource): room_upgrade_rest_servlet.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource) account_validity.register_servlets(hs, client_resource) + password_policy.register_servlets(hs, client_resource) relations.register_servlets(hs, client_resource) password_policy.register_servlets(hs, client_resource) # moving to /_synapse/admin - synapse.rest.admin.register_servlets_for_client_rest_resource( - hs, client_resource - ) + admin.register_servlets_for_client_rest_resource(hs, client_resource) # unstable shared_rooms.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py
index 970fdd5834..ceaa28c212 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py
@@ -49,9 +49,7 @@ class PresenceStatusRestServlet(RestServlet): raise AuthError(403, "You are not allowed to see their presence.") state = await self.presence_handler.get_state(target_user=user) - state = format_user_presence_state( - state, self.clock.time_msec(), include_user_id=False - ) + state = format_user_presence_state(state, self.clock.time_msec()) return 200, state diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index e7fe50ed72..165313b572 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py
@@ -14,6 +14,7 @@ # limitations under the License. """ This module contains REST servlets to do with profile: /profile/<paths> """ +from twisted.internet import defer from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -28,6 +29,7 @@ class ProfileDisplaynameRestServlet(RestServlet): super(ProfileDisplaynameRestServlet, self).__init__() self.hs = hs self.profile_handler = hs.get_profile_handler() + self.http_client = hs.get_simple_http_client() self.auth = hs.get_auth() async def on_GET(self, request, user_id): @@ -63,11 +65,27 @@ class ProfileDisplaynameRestServlet(RestServlet): await self.profile_handler.set_displayname(user, requester, new_name, is_admin) + if self.hs.config.shadow_server: + shadow_user = UserID(user.localpart, self.hs.config.shadow_server.get("hs")) + self.shadow_displayname(shadow_user.to_string(), content) + return 200, {} def on_OPTIONS(self, request, user_id): return 200, {} + @defer.inlineCallbacks + def shadow_displayname(self, user_id, body): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + yield self.http_client.put_json( + "%s/_matrix/client/r0/profile/%s/displayname?access_token=%s&user_id=%s" + % (shadow_hs_url, user_id, as_token, user_id), + body, + ) + class ProfileAvatarURLRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/avatar_url", v1=True) @@ -76,6 +94,7 @@ class ProfileAvatarURLRestServlet(RestServlet): super(ProfileAvatarURLRestServlet, self).__init__() self.hs = hs self.profile_handler = hs.get_profile_handler() + self.http_client = hs.get_simple_http_client() self.auth = hs.get_auth() async def on_GET(self, request, user_id): @@ -114,11 +133,27 @@ class ProfileAvatarURLRestServlet(RestServlet): user, requester, new_avatar_url, is_admin ) + if self.hs.config.shadow_server: + shadow_user = UserID(user.localpart, self.hs.config.shadow_server.get("hs")) + self.shadow_avatar_url(shadow_user.to_string(), content) + return 200, {} def on_OPTIONS(self, request, user_id): return 200, {} + @defer.inlineCallbacks + def shadow_avatar_url(self, user_id, body): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + yield self.http_client.put_json( + "%s/_matrix/client/r0/profile/%s/avatar_url?access_token=%s&user_id=%s" + % (shadow_hs_url, user_id, as_token, user_id), + body, + ) + class ProfileRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)", v1=True) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index e781a3bcf4..ddf8ed5e9c 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py
@@ -163,6 +163,18 @@ class PushRuleRestServlet(RestServlet): self.notifier.on_new_event("push_rules_key", stream_id, users=[user_id]) async def set_rule_attr(self, user_id, spec, val): + if spec["attr"] not in ("enabled", "actions"): + # for the sake of potential future expansion, shouldn't report + # 404 in the case of an unknown request so check it corresponds to + # a known attribute first. + raise UnrecognizedRequestError() + + namespaced_rule_id = _namespaced_rule_id_from_spec(spec) + rule_id = spec["rule_id"] + is_default_rule = rule_id.startswith(".") + if is_default_rule: + if namespaced_rule_id not in BASE_RULE_IDS: + raise NotFoundError("Unknown rule %s" % (namespaced_rule_id,)) if spec["attr"] == "enabled": if isinstance(val, dict) and "enabled" in val: val = val["enabled"] @@ -171,9 +183,8 @@ class PushRuleRestServlet(RestServlet): # This should *actually* take a dict, but many clients pass # bools directly, so let's not break them. raise SynapseError(400, "Value for 'enabled' must be boolean") - namespaced_rule_id = _namespaced_rule_id_from_spec(spec) return await self.store.set_push_rule_enabled( - user_id, namespaced_rule_id, val + user_id, namespaced_rule_id, val, is_default_rule ) elif spec["attr"] == "actions": actions = val.get("actions") diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 84baf3d59b..3929d5519d 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py
@@ -739,7 +739,8 @@ class RoomMembershipRestServlet(TransactionRestServlet): content["id_server"], requester, txn_id, - content.get("id_access_token"), + new_room=False, + id_access_token=content.get("id_access_token"), ) except ShadowBanError: # Pretend the request succeeded. diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 3481477731..4fe8f99c95 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py
@@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2018, 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,13 @@ # limitations under the License. import logging import random +import re from http import HTTPStatus +from typing import TYPE_CHECKING +from urllib.parse import urlparse + +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer from synapse.api.constants import LoginType from synapse.api.errors import ( @@ -34,6 +40,7 @@ from synapse.http.servlet import ( parse_string, ) from synapse.push.mailer import Mailer +from synapse.types import UserID from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import assert_valid_client_secret, random_string from synapse.util.threepids import canonicalise_email, check_3pid_allowed @@ -94,10 +101,14 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", email): raise SynapseError( 403, - "Your email domain is not authorized on this server", + "Your email is not authorized on this server", Codes.THREEPID_DENIED, ) + if next_link: + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) + # The email will be sent to the stored address. # This avoids a potential account hijack by requesting a password reset to # an email address which is controlled by the attacker but which, after @@ -144,81 +155,6 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): return 200, ret -class PasswordResetSubmitTokenServlet(RestServlet): - """Handles 3PID validation token submission""" - - PATTERNS = client_patterns( - "/password_reset/(?P<medium>[^/]*)/submit_token$", releases=(), unstable=True - ) - - def __init__(self, hs): - """ - Args: - hs (synapse.server.HomeServer): server - """ - super(PasswordResetSubmitTokenServlet, self).__init__() - self.hs = hs - self.auth = hs.get_auth() - self.config = hs.config - self.clock = hs.get_clock() - self.store = hs.get_datastore() - if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: - self._failure_email_template = ( - self.config.email_password_reset_template_failure_html - ) - - async def on_GET(self, request, medium): - # We currently only handle threepid token submissions for email - if medium != "email": - raise SynapseError( - 400, "This medium is currently not supported for password resets" - ) - if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF: - if self.config.local_threepid_handling_disabled_due_to_email_config: - logger.warning( - "Password reset emails have been disabled due to lack of an email config" - ) - raise SynapseError( - 400, "Email-based password resets are disabled on this server" - ) - - sid = parse_string(request, "sid", required=True) - token = parse_string(request, "token", required=True) - client_secret = parse_string(request, "client_secret", required=True) - assert_valid_client_secret(client_secret) - - # Attempt to validate a 3PID session - try: - # Mark the session as valid - next_link = await self.store.validate_threepid_session( - sid, client_secret, token, self.clock.time_msec() - ) - - # Perform a 302 redirect if next_link is set - if next_link: - if next_link.startswith("file:///"): - logger.warning( - "Not redirecting to next_link as it is a local file: address" - ) - else: - request.setResponseCode(302) - request.setHeader("Location", next_link) - finish_request(request) - return None - - # Otherwise show the success template - html = self.config.email_password_reset_template_success_html_content - status_code = 200 - except ThreepidValidationError as e: - status_code = e.code - - # Show a failure page with a reason - template_vars = {"failure_reason": e.msg} - html = self._failure_email_template.render(**template_vars) - - respond_with_html(request, status_code, html) - - class PasswordRestServlet(RestServlet): PATTERNS = client_patterns("/account/password$") @@ -230,6 +166,7 @@ class PasswordRestServlet(RestServlet): self.datastore = self.hs.get_datastore() self.password_policy_handler = hs.get_password_policy_handler() self._set_password_handler = hs.get_set_password_handler() + self.http_client = hs.get_simple_http_client() @interactive_auth_handler async def on_POST(self, request): @@ -255,26 +192,33 @@ class PasswordRestServlet(RestServlet): if self.auth.has_access_token(request): requester = await self.auth.get_user_by_req(request) - try: - params, session_id = await self.auth_handler.validate_user_via_ui_auth( - requester, - request, - body, - self.hs.get_ip_from_request(request), - "modify your account password", - ) - except InteractiveAuthIncompleteError as e: - # The user needs to provide more steps to complete auth, but - # they're not required to provide the password again. - # - # If a password is available now, hash the provided password and - # store it for later. - if new_password: - password_hash = await self.auth_handler.hash(new_password) - await self.auth_handler.set_session_data( - e.session_id, "password_hash", password_hash + # blindly trust ASes without UI-authing them + if requester.app_service: + params = body + else: + try: + ( + params, + session_id, + ) = await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body, + self.hs.get_ip_from_request(request), + "modify your account password", ) - raise + except InteractiveAuthIncompleteError as e: + # The user needs to provide more steps to complete auth, but + # they're not required to provide the password again. + # + # If a password is available now, hash the provided password and + # store it for later. + if new_password: + password_hash = await self.auth_handler.hash(new_password) + await self.auth_handler.set_session_data( + e.session_id, "password_hash", password_hash + ) + raise user_id = requester.user.to_string() else: requester = None @@ -339,11 +283,28 @@ class PasswordRestServlet(RestServlet): user_id, password_hash, logout_devices, requester ) + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + await self.shadow_password(params, shadow_user.to_string()) + return 200, {} def on_OPTIONS(self, _): return 200, {} + async def shadow_password(self, body, user_id): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + await self.http_client.post_json_get_json( + "%s/_matrix/client/r0/account/password?access_token=%s&user_id=%s" + % (shadow_hs_url, as_token, user_id), + body, + ) + class DeactivateAccountRestServlet(RestServlet): PATTERNS = client_patterns("/account/deactivate$") @@ -439,13 +400,17 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param - if not check_3pid_allowed(self.hs, "email", email): + if not (await check_3pid_allowed(self.hs, "email", email)): raise SynapseError( 403, - "Your email domain is not authorized on this server", + "Your email is not authorized on this server", Codes.THREEPID_DENIED, ) + if next_link: + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) + existing_user_id = await self.store.get_user_id_by_threepid("email", email) if existing_user_id is not None: @@ -510,13 +475,17 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): msisdn = phone_number_to_msisdn(country, phone_number) - if not check_3pid_allowed(self.hs, "msisdn", msisdn): + if not (await check_3pid_allowed(self.hs, "msisdn", msisdn)): raise SynapseError( 403, "Account phone numbers are not authorized on this server", Codes.THREEPID_DENIED, ) + if next_link: + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) + existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn) if existing_user_id is not None: @@ -603,15 +572,10 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet): # Perform a 302 redirect if next_link is set if next_link: - if next_link.startswith("file:///"): - logger.warning( - "Not redirecting to next_link as it is a local file: address" - ) - else: - request.setResponseCode(302) - request.setHeader("Location", next_link) - finish_request(request) - return None + request.setResponseCode(302) + request.setHeader("Location", next_link) + finish_request(request) + return None # Otherwise show the success template html = self.config.email_add_threepid_template_success_html_content @@ -677,7 +641,8 @@ class ThreepidRestServlet(RestServlet): self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() - self.datastore = self.hs.get_datastore() + self.datastore = hs.get_datastore() + self.http_client = hs.get_simple_http_client() async def on_GET(self, request): requester = await self.auth.get_user_by_req(request) @@ -696,6 +661,29 @@ class ThreepidRestServlet(RestServlet): user_id = requester.user.to_string() body = parse_json_object_from_request(request) + # skip validation if this is a shadow 3PID from an AS + if requester.app_service: + # XXX: ASes pass in a validated threepid directly to bypass the IS. + # This makes the API entirely change shape when we have an AS token; + # it really should be an entirely separate API - perhaps + # /account/3pid/replicate or something. + threepid = body.get("threepid") + + await self.auth_handler.add_threepid( + user_id, + threepid["medium"], + threepid["address"], + threepid["validated_at"], + ) + + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + await self.shadow_3pid({"threepid": threepid}, shadow_user.to_string()) + + return 200, {} + threepid_creds = body.get("threePidCreds") or body.get("three_pid_creds") if threepid_creds is None: raise SynapseError( @@ -717,12 +705,35 @@ class ThreepidRestServlet(RestServlet): validation_session["address"], validation_session["validated_at"], ) + + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + threepid = { + "medium": validation_session["medium"], + "address": validation_session["address"], + "validated_at": validation_session["validated_at"], + } + await self.shadow_3pid({"threepid": threepid}, shadow_user.to_string()) + return 200, {} raise SynapseError( 400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED ) + async def shadow_3pid(self, body, user_id): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + await self.http_client.post_json_get_json( + "%s/_matrix/client/r0/account/3pid?access_token=%s&user_id=%s" + % (shadow_hs_url, as_token, user_id), + body, + ) + class ThreepidAddRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/add$") @@ -733,6 +744,7 @@ class ThreepidAddRestServlet(RestServlet): self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() + self.http_client = hs.get_simple_http_client() @interactive_auth_handler async def on_POST(self, request): @@ -768,12 +780,33 @@ class ThreepidAddRestServlet(RestServlet): validation_session["address"], validation_session["validated_at"], ) + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + threepid = { + "medium": validation_session["medium"], + "address": validation_session["address"], + "validated_at": validation_session["validated_at"], + } + await self.shadow_3pid({"threepid": threepid}, shadow_user.to_string()) return 200, {} raise SynapseError( 400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED ) + async def shadow_3pid(self, body, user_id): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + await self.http_client.post_json_get_json( + "%s/_matrix/client/r0/account/3pid?access_token=%s&user_id=%s" + % (shadow_hs_url, as_token, user_id), + body, + ) + class ThreepidBindRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/bind$") @@ -843,6 +876,7 @@ class ThreepidDeleteRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() + self.http_client = hs.get_simple_http_client() async def on_POST(self, request): if not self.hs.config.enable_3pid_changes: @@ -867,6 +901,12 @@ class ThreepidDeleteRestServlet(RestServlet): logger.exception("Failed to remove threepid") raise SynapseError(500, "Failed to remove threepid") + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + await self.shadow_3pid_delete(body, shadow_user.to_string()) + if ret: id_server_unbind_result = "success" else: @@ -874,6 +914,113 @@ class ThreepidDeleteRestServlet(RestServlet): return 200, {"id_server_unbind_result": id_server_unbind_result} + async def shadow_3pid_delete(self, body, user_id): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + await self.http_client.post_json_get_json( + "%s/_matrix/client/r0/account/3pid/delete?access_token=%s&user_id=%s" + % (shadow_hs_url, as_token, user_id), + body, + ) + + +class ThreepidLookupRestServlet(RestServlet): + PATTERNS = [re.compile("^/_matrix/client/unstable/account/3pid/lookup$")] + + def __init__(self, hs): + super(ThreepidLookupRestServlet, self).__init__() + self.auth = hs.get_auth() + self.identity_handler = hs.get_handlers().identity_handler + + async def on_GET(self, request): + """Proxy a /_matrix/identity/api/v1/lookup request to an identity + server + """ + await self.auth.get_user_by_req(request) + + # Verify query parameters + query_params = request.args + assert_params_in_dict(query_params, [b"medium", b"address", b"id_server"]) + + # Retrieve needed information from query parameters + medium = parse_string(request, "medium") + address = parse_string(request, "address") + id_server = parse_string(request, "id_server") + + # Proxy the request to the identity server. lookup_3pid handles checking + # if the lookup is allowed so we don't need to do it here. + ret = await self.identity_handler.proxy_lookup_3pid(id_server, medium, address) + + return 200, ret + + +class ThreepidBulkLookupRestServlet(RestServlet): + PATTERNS = [re.compile("^/_matrix/client/unstable/account/3pid/bulk_lookup$")] + + def __init__(self, hs): + super(ThreepidBulkLookupRestServlet, self).__init__() + self.auth = hs.get_auth() + self.identity_handler = hs.get_handlers().identity_handler + + async def on_POST(self, request): + """Proxy a /_matrix/identity/api/v1/bulk_lookup request to an identity + server + """ + await self.auth.get_user_by_req(request) + + body = parse_json_object_from_request(request) + + assert_params_in_dict(body, ["threepids", "id_server"]) + + # Proxy the request to the identity server. lookup_3pid handles checking + # if the lookup is allowed so we don't need to do it here. + ret = await self.identity_handler.proxy_bulk_lookup_3pid( + body["id_server"], body["threepids"] + ) + + return 200, ret + + +def assert_valid_next_link(hs: "HomeServer", next_link: str): + """ + Raises a SynapseError if a given next_link value is invalid + + next_link is valid if the scheme is http(s) and the next_link.domain_whitelist config + option is either empty or contains a domain that matches the one in the given next_link + + Args: + hs: The homeserver object + next_link: The next_link value given by the client + + Raises: + SynapseError: If the next_link is invalid + """ + valid = True + + # Parse the contents of the URL + next_link_parsed = urlparse(next_link) + + # Scheme must not point to the local drive + if next_link_parsed.scheme == "file": + valid = False + + # If the domain whitelist is set, the domain must be in it + if ( + valid + and hs.config.next_link_domain_whitelist is not None + and next_link_parsed.hostname not in hs.config.next_link_domain_whitelist + ): + valid = False + + if not valid: + raise SynapseError( + 400, + "'next_link' domain not included in whitelist, or not http(s)", + errcode=Codes.INVALID_PARAM, + ) + class WhoamiRestServlet(RestServlet): PATTERNS = client_patterns("/account/whoami$") @@ -890,7 +1037,6 @@ class WhoamiRestServlet(RestServlet): def register_servlets(hs, http_server): EmailPasswordRequestTokenRestServlet(hs).register(http_server) - PasswordResetSubmitTokenServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) EmailThreepidRequestTokenRestServlet(hs).register(http_server) @@ -902,4 +1048,6 @@ def register_servlets(hs, http_server): ThreepidBindRestServlet(hs).register(http_server) ThreepidUnbindRestServlet(hs).register(http_server) ThreepidDeleteRestServlet(hs).register(http_server) + ThreepidLookupRestServlet(hs).register(http_server) + ThreepidBulkLookupRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py
index c1d4cd0caf..d31ec7c29d 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py
@@ -17,6 +17,7 @@ import logging from synapse.api.errors import AuthError, NotFoundError, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.types import UserID from ._base import client_patterns @@ -39,6 +40,7 @@ class AccountDataServlet(RestServlet): self.store = hs.get_datastore() self.notifier = hs.get_notifier() self._is_worker = hs.config.worker_app is not None + self._profile_handler = hs.get_profile_handler() async def on_PUT(self, request, user_id, account_data_type): if self._is_worker: @@ -50,6 +52,11 @@ class AccountDataServlet(RestServlet): body = parse_json_object_from_request(request) + if account_data_type == "im.vector.hide_profile": + user = UserID.from_string(user_id) + hide_profile = body.get("hide_profile") + await self._profile_handler.set_active([user], not hide_profile, True) + max_id = await self.store.add_account_data_for_user( user_id, account_data_type, body ) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index b6b90a8b30..d9424709dc 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py
@@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- -# Copyright 2015 - 2016 OpenMarket Ltd -# Copyright 2017 Vector Creations Ltd +# Copyright 2015-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +18,7 @@ import hmac import logging import random +import re from typing import List, Union import synapse @@ -117,10 +119,10 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param - if not check_3pid_allowed(self.hs, "email", email): + if not (await check_3pid_allowed(self.hs, "email", body["email"])): raise SynapseError( 403, - "Your email domain is not authorized to register on this server", + "Your email is not authorized to register on this server", Codes.THREEPID_DENIED, ) @@ -192,7 +194,9 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): msisdn = phone_number_to_msisdn(country, phone_number) - if not check_3pid_allowed(self.hs, "msisdn", msisdn): + assert_valid_client_secret(body["client_secret"]) + + if not (await check_3pid_allowed(self.hs, "msisdn", msisdn)): raise SynapseError( 403, "Phone numbers are not authorized to register on this server", @@ -344,15 +348,9 @@ class UsernameAvailabilityRestServlet(RestServlet): 403, "Registration has been disabled", errcode=Codes.FORBIDDEN ) - ip = self.hs.get_ip_from_request(request) - with self.ratelimiter.ratelimit(ip) as wait_deferred: - await wait_deferred - - username = parse_string(request, "username", required=True) - - await self.registration_handler.check_username(username) - - return 200, {"available": True} + # We are not interested in logging in via a username in this deployment. + # Simply allow anything here as it won't be used later. + return 200, {"available": True} class RegisterRestServlet(RestServlet): @@ -402,18 +400,27 @@ class RegisterRestServlet(RestServlet): "Do not understand membership kind: %s" % (kind.decode("utf8"),) ) - # Pull out the provided username and do basic sanity checks early since - # the auth layer will store these in sessions. + # We don't care about usernames for this deployment. In fact, the act + # of checking whether they exist already can leak metadata about + # which users are already registered. + # + # Usernames are already derived via the provided email. + # So, if they're not necessary, just ignore them. + # + # (we do still allow appservices to set them below) desired_username = None - if "username" in body: - if not isinstance(body["username"], str) or len(body["username"]) > 512: - raise SynapseError(400, "Invalid username") - desired_username = body["username"] + + desired_display_name = body.get("display_name") appservice = None if self.auth.has_access_token(request): appservice = self.auth.get_appservice_by_req(request) + # We need to retrieve the password early in order to pass it to + # application service registration + # This is specific to shadow server registration of users via an AS + password = body.pop("password", None) + # fork off as soon as possible for ASes which have completely # different registration flows to normal users @@ -422,7 +429,7 @@ class RegisterRestServlet(RestServlet): # Set the desired user according to the AS API (which uses the # 'user' key not 'username'). Since this is a new addition, we'll # fallback to 'username' if they gave one. - desired_username = body.get("user", desired_username) + desired_username = body.get("user", body.get("username")) # XXX we should check that desired_username is valid. Currently # we give appservices carte blanche for any insanity in mxids, @@ -433,7 +440,7 @@ class RegisterRestServlet(RestServlet): if isinstance(desired_username, str): result = await self._do_appservice_registration( - desired_username, access_token, body + desired_username, password, desired_display_name, access_token, body ) return 200, result # we throw for non 200 responses @@ -441,16 +448,6 @@ class RegisterRestServlet(RestServlet): if not self._registration_enabled: raise SynapseError(403, "Registration has been disabled") - # For regular registration, convert the provided username to lowercase - # before attempting to register it. This should mean that people who try - # to register with upper-case in their usernames don't get a nasty surprise. - # - # Note that we treat usernames case-insensitively in login, so they are - # free to carry on imagining that their username is CrAzYh4cKeR if that - # keeps them happy. - if desired_username is not None: - desired_username = desired_username.lower() - # Check if this account is upgrading from a guest account. guest_access_token = body.get("guest_access_token", None) @@ -459,7 +456,6 @@ class RegisterRestServlet(RestServlet): # Note that we remove the password from the body since the auth layer # will store the body in the session and we don't want a plaintext # password store there. - password = body.pop("password", None) if password is not None: if not isinstance(password, str) or len(password) > 512: raise SynapseError(400, "Invalid password") @@ -489,14 +485,6 @@ class RegisterRestServlet(RestServlet): session_id, "password_hash", None ) - # Ensure that the username is valid. - if desired_username is not None: - await self.registration_handler.check_username( - desired_username, - guest_access_token=guest_access_token, - assigned_user_id=registered_user_id, - ) - # Check if the user-interactive authentication flows are complete, if # not this will raise a user-interactive auth error. try: @@ -535,7 +523,7 @@ class RegisterRestServlet(RestServlet): medium = auth_result[login_type]["medium"] address = auth_result[login_type]["address"] - if not check_3pid_allowed(self.hs, medium, address): + if not (await check_3pid_allowed(self.hs, medium, address)): raise SynapseError( 403, "Third party identifiers (email/phone numbers)" @@ -543,6 +531,80 @@ class RegisterRestServlet(RestServlet): Codes.THREEPID_DENIED, ) + existingUid = await self.store.get_user_id_by_threepid( + medium, address + ) + + if existingUid is not None: + raise SynapseError( + 400, "%s is already in use" % medium, Codes.THREEPID_IN_USE + ) + + if self.hs.config.register_mxid_from_3pid: + # override the desired_username based on the 3PID if any. + # reset it first to avoid folks picking their own username. + desired_username = None + + # we should have an auth_result at this point if we're going to progress + # to register the user (i.e. we haven't picked up a registered_user_id + # from our session store), in which case get ready and gen the + # desired_username + if auth_result: + if ( + self.hs.config.register_mxid_from_3pid == "email" + and LoginType.EMAIL_IDENTITY in auth_result + ): + address = auth_result[LoginType.EMAIL_IDENTITY]["address"] + desired_username = synapse.types.strip_invalid_mxid_characters( + address.replace("@", "-").lower() + ) + + # find a unique mxid for the account, suffixing numbers + # if needed + while True: + try: + await self.registration_handler.check_username( + desired_username, + guest_access_token=guest_access_token, + assigned_user_id=registered_user_id, + ) + # if we got this far we passed the check. + break + except SynapseError as e: + if e.errcode == Codes.USER_IN_USE: + m = re.match(r"^(.*?)(\d+)$", desired_username) + if m: + desired_username = m.group(1) + str( + int(m.group(2)) + 1 + ) + else: + desired_username += "1" + else: + # something else went wrong. + break + + if self.hs.config.register_just_use_email_for_display_name: + desired_display_name = address + else: + # Custom mapping between email address and display name + desired_display_name = _map_email_to_displayname(address) + elif ( + self.hs.config.register_mxid_from_3pid == "msisdn" + and LoginType.MSISDN in auth_result + ): + desired_username = auth_result[LoginType.MSISDN]["address"] + else: + raise SynapseError( + 400, "Cannot derive mxid from 3pid; no recognised 3pid" + ) + + if desired_username is not None: + await self.registration_handler.check_username( + desired_username, + guest_access_token=guest_access_token, + assigned_user_id=registered_user_id, + ) + if registered_user_id is not None: logger.info( "Already registered user ID %r for this session", registered_user_id @@ -557,7 +619,12 @@ class RegisterRestServlet(RestServlet): if not password_hash: raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM) - desired_username = params.get("username", None) + if not self.hs.config.register_mxid_from_3pid: + desired_username = params.get("username", None) + else: + # we keep the original desired_username derived from the 3pid above + pass + guest_access_token = params.get("guest_access_token", None) if desired_username is not None: @@ -606,6 +673,7 @@ class RegisterRestServlet(RestServlet): localpart=desired_username, password_hash=password_hash, guest_access_token=guest_access_token, + default_display_name=desired_display_name, threepid=threepid, address=client_addr, user_agent_ips=entries, @@ -618,6 +686,14 @@ class RegisterRestServlet(RestServlet): ): await self.store.upsert_monthly_active_user(registered_user_id) + if self.hs.config.shadow_server: + await self.registration_handler.shadow_register( + localpart=desired_username, + display_name=desired_display_name, + auth_result=auth_result, + params=params, + ) + # Remember that the user account has been registered (and the user # ID it was registered with, since it might not have been specified). await self.auth_handler.set_session_data( @@ -642,11 +718,38 @@ class RegisterRestServlet(RestServlet): def on_OPTIONS(self, _): return 200, {} - async def _do_appservice_registration(self, username, as_token, body): + async def _do_appservice_registration( + self, username, password, display_name, as_token, body + ): + # FIXME: appservice_register() is horribly duplicated with register() + # and they should probably just be combined together with a config flag. + + if password: + # Hash the password + # + # In mainline hashing of the password was moved further on in the registration + # flow, but we need it here for the AS use-case of shadow servers + password = await self.auth_handler.hash(password) + user_id = await self.registration_handler.appservice_register( - username, as_token + username, as_token, password, display_name ) - return await self._create_registration_details(user_id, body) + result = await self._create_registration_details(user_id, body) + + auth_result = body.get("auth_result") + if auth_result and LoginType.EMAIL_IDENTITY in auth_result: + threepid = auth_result[LoginType.EMAIL_IDENTITY] + await self.registration_handler.register_email_threepid( + user_id, threepid, result["access_token"] + ) + + if auth_result and LoginType.MSISDN in auth_result: + threepid = auth_result[LoginType.MSISDN] + await self.registration_handler.register_msisdn_threepid( + user_id, threepid, result["access_token"] + ) + + return result async def _create_registration_details(self, user_id, params): """Complete registration of newly-registered user @@ -697,6 +800,60 @@ class RegisterRestServlet(RestServlet): ) +def cap(name): + """Capitalise parts of a name containing different words, including those + separated by hyphens. + For example, 'John-Doe' + + Args: + name (str): The name to parse + """ + if not name: + return name + + # Split the name by whitespace then hyphens, capitalizing each part then + # joining it back together. + capatilized_name = " ".join( + "-".join(part.capitalize() for part in space_part.split("-")) + for space_part in name.split() + ) + return capatilized_name + + +def _map_email_to_displayname(address): + """Custom mapping from an email address to a user displayname + + Args: + address (str): The email address to process + Returns: + str: The new displayname + """ + # Split the part before and after the @ in the email. + # Replace all . with spaces in the first part + parts = address.replace(".", " ").split("@") + + # Figure out which org this email address belongs to + org_parts = parts[1].split(" ") + + # If this is a ...matrix.org email, mark them as an Admin + if org_parts[-2] == "matrix" and org_parts[-1] == "org": + org = "Tchap Admin" + + # Is this is a ...gouv.fr address, set the org to whatever is before + # gouv.fr. If there isn't anything (a @gouv.fr email) simply mark their + # org as "gouv" + elif org_parts[-2] == "gouv" and org_parts[-1] == "fr": + org = org_parts[-3] if len(org_parts) > 2 else org_parts[-2] + + # Otherwise, mark their org as the email's second-level domain name + else: + org = org_parts[-2] + + desired_display_name = cap(parts[0]) + " [" + cap(org) + "]" + + return desired_display_name + + def _calculate_registration_flows( # technically `config` has to provide *all* of these interfaces, not just one config: Union[RegistrationConfig, ConsentConfig, CaptchaConfig], diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py
index bef91a2d3e..6e8300d6a5 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py
@@ -14,9 +14,17 @@ # limitations under the License. import logging +from typing import Dict -from synapse.api.errors import SynapseError -from synapse.http.servlet import RestServlet, parse_json_object_from_request +from signedjson.sign import sign_json + +from synapse.api.errors import Codes, SynapseError +from synapse.http.servlet import ( + RestServlet, + assert_params_in_dict, + parse_json_object_from_request, +) +from synapse.types import UserID from ._base import client_patterns @@ -35,6 +43,7 @@ class UserDirectorySearchRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() self.user_directory_handler = hs.get_user_directory_handler() + self.http_client = hs.get_simple_http_client() async def on_POST(self, request): """Searches for users in directory @@ -61,6 +70,16 @@ class UserDirectorySearchRestServlet(RestServlet): body = parse_json_object_from_request(request) + if self.hs.config.user_directory_defer_to_id_server: + signed_body = sign_json( + body, self.hs.hostname, self.hs.config.signing_key[0] + ) + url = "%s/_matrix/identity/api/v1/user_directory/search" % ( + self.hs.config.user_directory_defer_to_id_server, + ) + resp = await self.http_client.post_json_get_json(url, signed_body) + return 200, resp + limit = body.get("limit", 10) limit = min(limit, 50) @@ -76,5 +95,125 @@ class UserDirectorySearchRestServlet(RestServlet): return 200, results +class SingleUserInfoServlet(RestServlet): + """ + Deprecated and replaced by `/users/info` + + GET /user/{user_id}/info HTTP/1.1 + """ + + PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/info$") + + def __init__(self, hs): + super(SingleUserInfoServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() + self.store = hs.get_datastore() + self.transport_layer = hs.get_federation_transport_client() + registry = hs.get_federation_registry() + + if not registry.query_handlers.get("user_info"): + registry.register_query_handler("user_info", self._on_federation_query) + + async def on_GET(self, request, user_id): + # Ensure the user is authenticated + await self.auth.get_user_by_req(request) + + user = UserID.from_string(user_id) + if not self.hs.is_mine(user): + # Attempt to make a federation request to the server that owns this user + args = {"user_id": user_id} + res = await self.transport_layer.make_query( + user.domain, "user_info", args, retry_on_dns_fail=True + ) + return 200, res + + user_id_to_info = await self.store.get_info_for_users([user_id]) + return 200, user_id_to_info[user_id] + + async def _on_federation_query(self, args): + """Called when a request for user information appears over federation + + Args: + args (dict): Dictionary of query arguments provided by the request + + Returns: + Deferred[dict]: Deactivation and expiration information for a given user + """ + user_id = args.get("user_id") + if not user_id: + raise SynapseError(400, "user_id not provided") + + user = UserID.from_string(user_id) + if not self.hs.is_mine(user): + raise SynapseError(400, "User is not hosted on this homeserver") + + user_ids_to_info_dict = await self.store.get_info_for_users([user_id]) + return user_ids_to_info_dict[user_id] + + +class UserInfoServlet(RestServlet): + """Bulk version of `/user/{user_id}/info` endpoint + + GET /users/info HTTP/1.1 + + Returns a dictionary of user_id to info dictionary. Supports remote users + """ + + PATTERNS = client_patterns("/users/info$", unstable=True, releases=()) + + def __init__(self, hs): + super(UserInfoServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() + self.store = hs.get_datastore() + self.transport_layer = hs.get_federation_transport_client() + + async def on_POST(self, request): + # Ensure the user is authenticated + await self.auth.get_user_by_req(request) + + # Extract the user_ids from the request + body = parse_json_object_from_request(request) + assert_params_in_dict(body, required=["user_ids"]) + + user_ids = body["user_ids"] + if not isinstance(user_ids, list): + raise SynapseError( + 400, + "'user_ids' must be a list of user ID strings", + errcode=Codes.INVALID_PARAM, + ) + + # Separate local and remote users + local_user_ids = set() + remote_server_to_user_ids = {} # type: Dict[str, set] + for user_id in user_ids: + user = UserID.from_string(user_id) + + if self.hs.is_mine(user): + local_user_ids.add(user_id) + else: + remote_server_to_user_ids.setdefault(user.domain, set()) + remote_server_to_user_ids[user.domain].add(user_id) + + # Retrieve info of all local users + user_id_to_info_dict = await self.store.get_info_for_users(local_user_ids) + + # Request info of each remote user from their remote homeserver + for server_name, user_id_set in remote_server_to_user_ids.items(): + # Make a request to the given server about their own users + res = await self.transport_layer.get_info_of_users( + server_name, list(user_id_set) + ) + + for user_id, info in res: + user_id_to_info_dict[user_id] = info + + return 200, user_id_to_info_dict + + def register_servlets(hs, http_server): UserDirectorySearchRestServlet(hs).register(http_server) + SingleUserInfoServlet(hs).register(http_server) + UserInfoServlet(hs).register(http_server) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 24ac57f35d..58ec5a694a 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py
@@ -57,9 +57,12 @@ class VersionsRestServlet(RestServlet): # MSC2326. "org.matrix.label_based_filtering": True, # Implements support for cross signing as described in MSC1756 - "org.matrix.e2e_cross_signing": True, + # "org.matrix.e2e_cross_signing": True, # Implements additional endpoints as described in MSC2432 "org.matrix.msc2432": True, + # Tchap does not currently assume this rule for r0.5.0 + # XXX: Remove this when it does + "m.lazy_load_members": True, # Implements additional endpoints as described in MSC2666 "uk.half-shot.msc2666": True, }, diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py
index d2826374a7..7447eeaebe 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py
@@ -80,7 +80,7 @@ class MediaFilePaths: self, server_name, file_id, width, height, content_type, method ): top_level_type, sub_type = content_type.split("/") - file_name = "%i-%i-%s-%s" % (width, height, top_level_type, sub_type) + file_name = "%i-%i-%s-%s-%s" % (width, height, top_level_type, sub_type, method) return os.path.join( "remote_thumbnail", server_name, @@ -92,6 +92,23 @@ class MediaFilePaths: remote_media_thumbnail = _wrap_in_base_path(remote_media_thumbnail_rel) + # Legacy path that was used to store thumbnails previously. + # Should be removed after some time, when most of the thumbnails are stored + # using the new path. + def remote_media_thumbnail_rel_legacy( + self, server_name, file_id, width, height, content_type + ): + top_level_type, sub_type = content_type.split("/") + file_name = "%i-%i-%s-%s" % (width, height, top_level_type, sub_type) + return os.path.join( + "remote_thumbnail", + server_name, + file_id[0:2], + file_id[2:4], + file_id[4:], + file_name, + ) + def remote_media_thumbnail_dir(self, server_name, file_id): return os.path.join( self.base_path, diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 9a1b7779f7..69f353d46f 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py
@@ -53,7 +53,7 @@ from .media_storage import MediaStorage from .preview_url_resource import PreviewUrlResource from .storage_provider import StorageProviderWrapper from .thumbnail_resource import ThumbnailResource -from .thumbnailer import Thumbnailer +from .thumbnailer import Thumbnailer, ThumbnailError from .upload_resource import UploadResource logger = logging.getLogger(__name__) @@ -460,13 +460,30 @@ class MediaRepository: return t_byte_source async def generate_local_exact_thumbnail( - self, media_id, t_width, t_height, t_method, t_type, url_cache - ): + self, + media_id: str, + t_width: int, + t_height: int, + t_method: str, + t_type: str, + url_cache: str, + ) -> Optional[str]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(None, media_id, url_cache=url_cache) ) - thumbnailer = Thumbnailer(input_path) + try: + thumbnailer = Thumbnailer(input_path) + except ThumbnailError as e: + logger.warning( + "Unable to generate a thumbnail for local media %s using a method of %s and type of %s: %s", + media_id, + t_method, + t_type, + e, + ) + return None + t_byte_source = await defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, @@ -506,14 +523,36 @@ class MediaRepository: return output_path + # Could not generate thumbnail. + return None + async def generate_remote_exact_thumbnail( - self, server_name, file_id, media_id, t_width, t_height, t_method, t_type - ): + self, + server_name: str, + file_id: str, + media_id: str, + t_width: int, + t_height: int, + t_method: str, + t_type: str, + ) -> Optional[str]: input_path = await self.media_storage.ensure_media_is_in_local_cache( FileInfo(server_name, file_id, url_cache=False) ) - thumbnailer = Thumbnailer(input_path) + try: + thumbnailer = Thumbnailer(input_path) + except ThumbnailError as e: + logger.warning( + "Unable to generate a thumbnail for remote media %s from %s using a method of %s and type of %s: %s", + media_id, + server_name, + t_method, + t_type, + e, + ) + return None + t_byte_source = await defer_to_thread( self.hs.get_reactor(), self._generate_thumbnail, @@ -559,6 +598,9 @@ class MediaRepository: return output_path + # Could not generate thumbnail. + return None + async def _generate_thumbnails( self, server_name: Optional[str], @@ -590,7 +632,18 @@ class MediaRepository: FileInfo(server_name, file_id, url_cache=url_cache) ) - thumbnailer = Thumbnailer(input_path) + try: + thumbnailer = Thumbnailer(input_path) + except ThumbnailError as e: + logger.warning( + "Unable to generate thumbnails for remote media %s from %s using a method of %s and type of %s: %s", + media_id, + server_name, + media_type, + e, + ) + return None + m_width = thumbnailer.width m_height = thumbnailer.height diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py
index 3a352b5631..5681677fc9 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py
@@ -147,6 +147,20 @@ class MediaStorage: if os.path.exists(local_path): return FileResponder(open(local_path, "rb")) + # Fallback for paths without method names + # Should be removed in the future + if file_info.thumbnail and file_info.server_name: + legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail_width, + height=file_info.thumbnail_height, + content_type=file_info.thumbnail_type, + ) + legacy_local_path = os.path.join(self.local_media_directory, legacy_path) + if os.path.exists(legacy_local_path): + return FileResponder(open(legacy_local_path, "rb")) + for provider in self.storage_providers: res = await provider.fetch(path, file_info) # type: Any if res: @@ -170,6 +184,20 @@ class MediaStorage: if os.path.exists(local_path): return local_path + # Fallback for paths without method names + # Should be removed in the future + if file_info.thumbnail and file_info.server_name: + legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail_width, + height=file_info.thumbnail_height, + content_type=file_info.thumbnail_type, + ) + legacy_local_path = os.path.join(self.local_media_directory, legacy_path) + if os.path.exists(legacy_local_path): + return legacy_local_path + dirname = os.path.dirname(local_path) if not os.path.exists(dirname): os.makedirs(dirname) diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py
index a83535b97b..30421b663a 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py
@@ -16,6 +16,7 @@ import logging +from synapse.api.errors import SynapseError from synapse.http.server import DirectServeJsonResource, set_cors_headers from synapse.http.servlet import parse_integer, parse_string @@ -173,7 +174,7 @@ class ThumbnailResource(DirectServeJsonResource): await respond_with_file(request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") - respond_404(request) + raise SynapseError(400, "Failed to generate thumbnail.") async def _select_or_generate_remote_thumbnail( self, @@ -235,7 +236,7 @@ class ThumbnailResource(DirectServeJsonResource): await respond_with_file(request, desired_type, file_path) else: logger.warning("Failed to generate thumbnail") - respond_404(request) + raise SynapseError(400, "Failed to generate thumbnail.") async def _respond_remote_thumbnail( self, request, server_name, media_id, width, height, method, m_type diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py
index d681bf7bf0..457ad6031c 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py
@@ -15,7 +15,7 @@ import logging from io import BytesIO -from PIL import Image as Image +from PIL import Image logger = logging.getLogger(__name__) @@ -31,12 +31,22 @@ EXIF_TRANSPOSE_MAPPINGS = { } +class ThumbnailError(Exception): + """An error occurred generating a thumbnail.""" + + class Thumbnailer: FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"} def __init__(self, input_path): - self.image = Image.open(input_path) + try: + self.image = Image.open(input_path) + except OSError as e: + # If an error occurs opening the image, a thumbnail won't be able to + # be generated. + raise ThumbnailError from e + self.width, self.height = self.image.size self.transpose_method = None try: diff --git a/synapse/rest/synapse/__init__.py b/synapse/rest/synapse/__init__.py new file mode 100644
index 0000000000..c0b733488b --- /dev/null +++ b/synapse/rest/synapse/__init__.py
@@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py new file mode 100644
index 0000000000..c0b733488b --- /dev/null +++ b/synapse/rest/synapse/client/__init__.py
@@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py new file mode 100644
index 0000000000..9e4fbc0cbd --- /dev/null +++ b/synapse/rest/synapse/client/password_reset.py
@@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Tuple + +from twisted.web.http import Request + +from synapse.api.errors import ThreepidValidationError +from synapse.config.emailconfig import ThreepidBehaviour +from synapse.http.server import DirectServeHtmlResource +from synapse.http.servlet import parse_string +from synapse.util.stringutils import assert_valid_client_secret + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class PasswordResetSubmitTokenResource(DirectServeHtmlResource): + """Handles 3PID validation token submission + + This resource gets mounted under /_synapse/client/password_reset/email/submit_token + """ + + isLeaf = 1 + + def __init__(self, hs: "HomeServer"): + """ + Args: + hs: server + """ + super().__init__() + + self.clock = hs.get_clock() + self.store = hs.get_datastore() + + self._local_threepid_handling_disabled_due_to_email_config = ( + hs.config.local_threepid_handling_disabled_due_to_email_config + ) + self._confirmation_email_template = ( + hs.config.email_password_reset_template_confirmation_html + ) + self._email_password_reset_template_success_html = ( + hs.config.email_password_reset_template_success_html_content + ) + self._failure_email_template = ( + hs.config.email_password_reset_template_failure_html + ) + + # This resource should not be mounted if threepid behaviour is not LOCAL + assert hs.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL + + async def _async_render_GET(self, request: Request) -> Tuple[int, bytes]: + sid = parse_string(request, "sid", required=True) + token = parse_string(request, "token", required=True) + client_secret = parse_string(request, "client_secret", required=True) + assert_valid_client_secret(client_secret) + + # Show a confirmation page, just in case someone accidentally clicked this link when + # they didn't mean to + template_vars = { + "sid": sid, + "token": token, + "client_secret": client_secret, + } + return ( + 200, + self._confirmation_email_template.render(**template_vars).encode("utf-8"), + ) + + async def _async_render_POST(self, request: Request) -> Tuple[int, bytes]: + sid = parse_string(request, "sid", required=True) + token = parse_string(request, "token", required=True) + client_secret = parse_string(request, "client_secret", required=True) + + # Attempt to validate a 3PID session + try: + # Mark the session as valid + next_link = await self.store.validate_threepid_session( + sid, client_secret, token, self.clock.time_msec() + ) + + # Perform a 302 redirect if next_link is set + if next_link: + if next_link.startswith("file:///"): + logger.warning( + "Not redirecting to next_link as it is a local file: address" + ) + else: + next_link_bytes = next_link.encode("utf-8") + request.setHeader("Location", next_link_bytes) + return ( + 302, + ( + b'You are being redirected to <a src="%s">%s</a>.' + % (next_link_bytes, next_link_bytes) + ), + ) + + # Otherwise show the success template + html_bytes = self._email_password_reset_template_success_html.encode( + "utf-8" + ) + status_code = 200 + except ThreepidValidationError as e: + status_code = e.code + + # Show a failure page with a reason + template_vars = {"failure_reason": e.msg} + html_bytes = self._failure_email_template.render(**template_vars).encode( + "utf-8" + ) + + return status_code, html_bytes diff --git a/synapse/rulecheck/__init__.py b/synapse/rulecheck/__init__.py new file mode 100644
index 0000000000..e69de29bb2 --- /dev/null +++ b/synapse/rulecheck/__init__.py
diff --git a/synapse/rulecheck/domain_rule_checker.py b/synapse/rulecheck/domain_rule_checker.py new file mode 100644
index 0000000000..6f2a1931c5 --- /dev/null +++ b/synapse/rulecheck/domain_rule_checker.py
@@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from synapse.config._base import ConfigError + +logger = logging.getLogger(__name__) + + +class DomainRuleChecker(object): + """ + A re-implementation of the SpamChecker that prevents users in one domain from + inviting users in other domains to rooms, based on a configuration. + + Takes a config in the format: + + spam_checker: + module: "rulecheck.DomainRuleChecker" + config: + domain_mapping: + "inviter_domain": [ "invitee_domain_permitted", "other_domain_permitted" ] + "other_inviter_domain": [ "invitee_domain_permitted" ] + default: False + + # Only let local users join rooms if they were explicitly invited. + can_only_join_rooms_with_invite: false + + # Only let local users create rooms if they are inviting only one + # other user, and that user matches the rules above. + can_only_create_one_to_one_rooms: false + + # Only let local users invite during room creation, regardless of the + # domain mapping rules above. + can_only_invite_during_room_creation: false + + # Prevent local users from inviting users from certain domains to + # rooms published in the room directory. + domains_prevented_from_being_invited_to_published_rooms: [] + + # Allow third party invites + can_invite_by_third_party_id: true + + Don't forget to consider if you can invite users from your own domain. + """ + + def __init__(self, config): + self.domain_mapping = config["domain_mapping"] or {} + self.default = config["default"] + + self.can_only_join_rooms_with_invite = config.get( + "can_only_join_rooms_with_invite", False + ) + self.can_only_create_one_to_one_rooms = config.get( + "can_only_create_one_to_one_rooms", False + ) + self.can_only_invite_during_room_creation = config.get( + "can_only_invite_during_room_creation", False + ) + self.can_invite_by_third_party_id = config.get( + "can_invite_by_third_party_id", True + ) + self.domains_prevented_from_being_invited_to_published_rooms = config.get( + "domains_prevented_from_being_invited_to_published_rooms", [] + ) + + def check_event_for_spam(self, event): + """Implements synapse.events.SpamChecker.check_event_for_spam + """ + return False + + def user_may_invite( + self, + inviter_userid, + invitee_userid, + third_party_invite, + room_id, + new_room, + published_room=False, + ): + """Implements synapse.events.SpamChecker.user_may_invite + """ + if self.can_only_invite_during_room_creation and not new_room: + return False + + if not self.can_invite_by_third_party_id and third_party_invite: + return False + + # This is a third party invite (without a bound mxid), so unless we have + # banned all third party invites (above) we allow it. + if not invitee_userid: + return True + + inviter_domain = self._get_domain_from_id(inviter_userid) + invitee_domain = self._get_domain_from_id(invitee_userid) + + if inviter_domain not in self.domain_mapping: + return self.default + + if ( + published_room + and invitee_domain + in self.domains_prevented_from_being_invited_to_published_rooms + ): + return False + + return invitee_domain in self.domain_mapping[inviter_domain] + + def user_may_create_room( + self, userid, invite_list, third_party_invite_list, cloning + ): + """Implements synapse.events.SpamChecker.user_may_create_room + """ + + if cloning: + return True + + if not self.can_invite_by_third_party_id and third_party_invite_list: + return False + + number_of_invites = len(invite_list) + len(third_party_invite_list) + + if self.can_only_create_one_to_one_rooms and number_of_invites != 1: + return False + + return True + + def user_may_create_room_alias(self, userid, room_alias): + """Implements synapse.events.SpamChecker.user_may_create_room_alias + """ + return True + + def user_may_publish_room(self, userid, room_id): + """Implements synapse.events.SpamChecker.user_may_publish_room + """ + return True + + def user_may_join_room(self, userid, room_id, is_invited): + """Implements synapse.events.SpamChecker.user_may_join_room + """ + if self.can_only_join_rooms_with_invite and not is_invited: + return False + + return True + + @staticmethod + def parse_config(config): + """Implements synapse.events.SpamChecker.parse_config + """ + if "default" in config: + return config + else: + raise ConfigError("No default set for spam_config DomainRuleChecker") + + @staticmethod + def _get_domain_from_id(mxid): + """Parses a string and returns the domain part of the mxid. + + Args: + mxid (str): a valid mxid + + Returns: + str: the domain part of the mxid + + """ + idx = mxid.find(":") + if idx == -1: + raise Exception("Invalid ID: %r" % (mxid,)) + return mxid[idx + 1 :] diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index ed8a9bffb1..79ec8f119d 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py
@@ -952,7 +952,7 @@ class DatabasePool: key_names: Collection[str], key_values: Collection[Iterable[Any]], value_names: Collection[str], - value_values: Iterable[Iterable[str]], + value_values: Iterable[Iterable[Any]], ) -> None: """ Upsert, many times. @@ -981,7 +981,7 @@ class DatabasePool: key_names: Iterable[str], key_values: Collection[Iterable[Any]], value_names: Collection[str], - value_values: Iterable[Iterable[str]], + value_values: Iterable[Iterable[Any]], ) -> None: """ Upsert, many times, but without native UPSERT support or batching. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py
index add4e3ea0e..306fc6947c 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py
@@ -481,7 +481,7 @@ class DeviceWorkerStore(SQLBaseStore): } async def get_users_whose_devices_changed( - self, from_key: str, user_ids: Iterable[str] + self, from_key: int, user_ids: Iterable[str] ) -> Set[str]: """Get set of users whose devices have changed since `from_key` that are in the given list of user_ids. @@ -493,7 +493,6 @@ class DeviceWorkerStore(SQLBaseStore): Returns: The set of user_ids whose devices have changed since `from_key` """ - from_key = int(from_key) # Get set of users who *may* have changed. Users not in the returned # list have definitely not changed. @@ -527,7 +526,7 @@ class DeviceWorkerStore(SQLBaseStore): ) async def get_users_whose_signatures_changed( - self, user_id: str, from_key: str + self, user_id: str, from_key: int ) -> Set[str]: """Get the users who have new cross-signing signatures made by `user_id` since `from_key`. @@ -539,7 +538,7 @@ class DeviceWorkerStore(SQLBaseStore): Returns: A set of user IDs with updated signatures. """ - from_key = int(from_key) + if self._user_signature_stream_cache.has_entity_changed(user_id, from_key): sql = """ SELECT DISTINCT user_ids FROM user_signature_stream diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py
index 86557d5512..1d76c761a6 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py
@@ -17,6 +17,10 @@ from typing import Any, Dict, Iterable, List, Optional, Tuple from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool +BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD = ( + "media_repository_drop_index_wo_method" +) + class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): @@ -32,6 +36,59 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): where_clause="url_cache IS NOT NULL", ) + # The following the updates add the method to the unique constraint of + # the thumbnail databases. That fixes an issue, where thumbnails of the + # same resolution, but different methods could overwrite one another. + # This can happen with custom thumbnail configs or with dynamic thumbnailing. + self.db_pool.updates.register_background_index_update( + update_name="local_media_repository_thumbnails_method_idx", + index_name="local_media_repository_thumbn_media_id_width_height_method_key", + table="local_media_repository_thumbnails", + columns=[ + "media_id", + "thumbnail_width", + "thumbnail_height", + "thumbnail_type", + "thumbnail_method", + ], + unique=True, + ) + + self.db_pool.updates.register_background_index_update( + update_name="remote_media_repository_thumbnails_method_idx", + index_name="remote_media_repository_thumbn_media_origin_id_width_height_method_key", + table="remote_media_cache_thumbnails", + columns=[ + "media_origin", + "media_id", + "thumbnail_width", + "thumbnail_height", + "thumbnail_type", + "thumbnail_method", + ], + unique=True, + ) + + self.db_pool.updates.register_background_update_handler( + BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD, + self._drop_media_index_without_method, + ) + + async def _drop_media_index_without_method(self, progress, batch_size): + def f(txn): + txn.execute( + "ALTER TABLE local_media_repository_thumbnails DROP CONSTRAINT IF EXISTS local_media_repository_thumbn_media_id_thumbnail_width_thum_key" + ) + txn.execute( + "ALTER TABLE remote_media_cache_thumbnails DROP CONSTRAINT IF EXISTS remote_media_repository_thumbn_media_id_thumbnail_width_thum_key" + ) + + await self.db_pool.runInteraction("drop_media_indices_without_method", f) + await self.db_pool.updates._end_background_update( + BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD + ) + return 1 + class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): """Persistence for attachments and avatars""" diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py
index d2e0685e9e..de37866d25 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py
@@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +13,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional, Tuple from synapse.api.errors import StoreError from synapse.storage._base import SQLBaseStore from synapse.storage.databases.main.roommember import ProfileInfo +from synapse.types import UserID +from synapse.util.caches.descriptors import cached + +BATCH_SIZE = 100 class ProfileWorkerStore(SQLBaseStore): @@ -39,6 +44,7 @@ class ProfileWorkerStore(SQLBaseStore): avatar_url=profile["avatar_url"], display_name=profile["displayname"] ) + @cached(max_entries=5000) async def get_profile_displayname(self, user_localpart: str) -> str: return await self.db_pool.simple_select_one_onecol( table="profiles", @@ -47,6 +53,7 @@ class ProfileWorkerStore(SQLBaseStore): desc="get_profile_displayname", ) + @cached(max_entries=5000) async def get_profile_avatar_url(self, user_localpart: str) -> str: return await self.db_pool.simple_select_one_onecol( table="profiles", @@ -55,6 +62,58 @@ class ProfileWorkerStore(SQLBaseStore): desc="get_profile_avatar_url", ) + async def get_latest_profile_replication_batch_number(self): + def f(txn): + txn.execute("SELECT MAX(batch) as maxbatch FROM profiles") + rows = self.db_pool.cursor_to_dict(txn) + return rows[0]["maxbatch"] + + return await self.db_pool.runInteraction( + "get_latest_profile_replication_batch_number", f + ) + + async def get_profile_batch(self, batchnum): + return await self.db_pool.simple_select_list( + table="profiles", + keyvalues={"batch": batchnum}, + retcols=("user_id", "displayname", "avatar_url", "active"), + desc="get_profile_batch", + ) + + async def assign_profile_batch(self): + def f(txn): + sql = ( + "UPDATE profiles SET batch = " + "(SELECT COALESCE(MAX(batch), -1) + 1 FROM profiles) " + "WHERE user_id in (" + " SELECT user_id FROM profiles WHERE batch is NULL limit ?" + ")" + ) + txn.execute(sql, (BATCH_SIZE,)) + return txn.rowcount + + return await self.db_pool.runInteraction("assign_profile_batch", f) + + async def get_replication_hosts(self): + def f(txn): + txn.execute( + "SELECT host, last_synced_batch FROM profile_replication_status" + ) + rows = self.db_pool.cursor_to_dict(txn) + return {r["host"]: r["last_synced_batch"] for r in rows} + + return await self.db_pool.runInteraction("get_replication_hosts", f) + + async def update_replication_batch_for_host( + self, host: str, last_synced_batch: int + ): + return await self.db_pool.simple_upsert( + table="profile_replication_status", + keyvalues={"host": host}, + values={"last_synced_batch": last_synced_batch}, + desc="update_replication_batch_for_host", + ) + async def get_from_remote_profile_cache( self, user_id: str ) -> Optional[Dict[str, Any]]: @@ -72,27 +131,82 @@ class ProfileWorkerStore(SQLBaseStore): ) async def set_profile_displayname( - self, user_localpart: str, new_displayname: str + self, user_localpart: str, new_displayname: str, batchnum: int ) -> None: - await self.db_pool.simple_update_one( + # Invalidate the read cache for this user + self.get_profile_displayname.invalidate((user_localpart,)) + + await self.db_pool.simple_upsert( table="profiles", keyvalues={"user_id": user_localpart}, - updatevalues={"displayname": new_displayname}, + values={"displayname": new_displayname, "batch": batchnum}, desc="set_profile_displayname", + lock=False, # we can do this because user_id has a unique index ) async def set_profile_avatar_url( - self, user_localpart: str, new_avatar_url: str + self, user_localpart: str, new_avatar_url: str, batchnum: int ) -> None: - await self.db_pool.simple_update_one( + # Invalidate the read cache for this user + self.get_profile_avatar_url.invalidate((user_localpart,)) + + await self.db_pool.simple_upsert( table="profiles", keyvalues={"user_id": user_localpart}, - updatevalues={"avatar_url": new_avatar_url}, + values={"avatar_url": new_avatar_url, "batch": batchnum}, desc="set_profile_avatar_url", + lock=False, # we can do this because user_id has a unique index + ) + + async def set_profiles_active( + self, users: List[UserID], active: bool, hide: bool, batchnum: int, + ) -> None: + """Given a set of users, set active and hidden flags on them. + + Args: + users: A list of UserIDs + active: Whether to set the users to active or inactive + hide: Whether to hide the users (withold from replication). If + False and active is False, users will have their profiles + erased + batchnum: The batch number, used for profile replication + """ + # Convert list of localparts to list of tuples containing localparts + user_localparts = [(user.localpart,) for user in users] + + # Generate list of value tuples for each user + value_names = ("active", "batch") + values = [(int(active), batchnum) for _ in user_localparts] # type: List[Tuple] + + if not active and not hide: + # we are deactivating for real (not in hide mode) + # so clear the profile information + value_names += ("avatar_url", "displayname") + values = [v + (None, None) for v in values] + + return await self.db_pool.runInteraction( + "set_profiles_active", + self.db_pool.simple_upsert_many_txn, + table="profiles", + key_names=("user_id",), + key_values=user_localparts, + value_names=value_names, + value_values=values, ) class ProfileStore(ProfileWorkerStore): + def __init__(self, database, db_conn, hs): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_index_update( + "profile_replication_status_host_index", + index_name="profile_replication_status_idx", + table="profile_replication_status", + columns=["host"], + unique=True, + ) + async def add_remote_profile_cache( self, user_id: str, displayname: str, avatar_url: str ) -> None: @@ -115,10 +229,10 @@ class ProfileStore(ProfileWorkerStore): async def update_remote_profile_cache( self, user_id: str, displayname: str, avatar_url: str ) -> int: - return await self.db_pool.simple_update( + return await self.db_pool.simple_upsert( table="remote_profile_cache", keyvalues={"user_id": user_id}, - updatevalues={ + values={ "displayname": displayname, "avatar_url": avatar_url, "last_check": self._clock.time_msec(), diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index ea833829ae..d7a03cbf7d 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py
@@ -69,6 +69,7 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore): # room_depth # state_groups # state_groups_state + # destination_rooms # we will build a temporary table listing the events so that we don't # have to keep shovelling the list back and forth across the @@ -336,6 +337,7 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore): # and finally, the tables with an index on room_id (or no useful index) for table in ( "current_state_events", + "destination_rooms", "event_backward_extremities", "event_forward_extremities", "event_json", diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py
index 0de802a86b..9790a31998 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py
@@ -13,11 +13,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import abc import logging from typing import List, Tuple, Union +from synapse.api.errors import NotFoundError, StoreError from synapse.push.baserules import list_with_base_rules from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.storage._base import SQLBaseStore, db_to_json @@ -27,6 +27,7 @@ from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.pusher import PusherWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.roommember import RoomMemberWorkerStore +from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException from synapse.storage.util.id_generators import StreamIdGenerator from synapse.util import json_encoder @@ -540,6 +541,25 @@ class PushRuleStore(PushRulesWorkerStore): }, ) + # ensure we have a push_rules_enable row + # enabledness defaults to true + if isinstance(self.database_engine, PostgresEngine): + sql = """ + INSERT INTO push_rules_enable (id, user_name, rule_id, enabled) + VALUES (?, ?, ?, ?) + ON CONFLICT DO NOTHING + """ + elif isinstance(self.database_engine, Sqlite3Engine): + sql = """ + INSERT OR IGNORE INTO push_rules_enable (id, user_name, rule_id, enabled) + VALUES (?, ?, ?, ?) + """ + else: + raise RuntimeError("Unknown database engine") + + new_enable_id = self._push_rules_enable_id_gen.get_next() + txn.execute(sql, (new_enable_id, user_id, rule_id, 1)) + async def delete_push_rule(self, user_id: str, rule_id: str) -> None: """ Delete a push rule. Args specify the row to be deleted and can be @@ -552,6 +572,12 @@ class PushRuleStore(PushRulesWorkerStore): """ def delete_push_rule_txn(txn, stream_id, event_stream_ordering): + # we don't use simple_delete_one_txn because that would fail if the + # user did not have a push_rule_enable row. + self.db_pool.simple_delete_txn( + txn, "push_rules_enable", {"user_name": user_id, "rule_id": rule_id} + ) + self.db_pool.simple_delete_one_txn( txn, "push_rules", {"user_name": user_id, "rule_id": rule_id} ) @@ -570,10 +596,29 @@ class PushRuleStore(PushRulesWorkerStore): event_stream_ordering, ) - async def set_push_rule_enabled(self, user_id, rule_id, enabled) -> None: + async def set_push_rule_enabled( + self, user_id: str, rule_id: str, enabled: bool, is_default_rule: bool + ) -> None: + """ + Sets the `enabled` state of a push rule. + + Args: + user_id: the user ID of the user who wishes to enable/disable the rule + e.g. '@tina:example.org' + rule_id: the full rule ID of the rule to be enabled/disabled + e.g. 'global/override/.m.rule.roomnotif' + or 'global/override/myCustomRule' + enabled: True if the rule is to be enabled, False if it is to be + disabled + is_default_rule: True if and only if this is a server-default rule. + This skips the check for existence (as only user-created rules + are always stored in the database `push_rules` table). + + Raises: + NotFoundError if the rule does not exist. + """ with await self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() - await self.db_pool.runInteraction( "_set_push_rule_enabled_txn", self._set_push_rule_enabled_txn, @@ -582,12 +627,47 @@ class PushRuleStore(PushRulesWorkerStore): user_id, rule_id, enabled, + is_default_rule, ) def _set_push_rule_enabled_txn( - self, txn, stream_id, event_stream_ordering, user_id, rule_id, enabled + self, + txn, + stream_id, + event_stream_ordering, + user_id, + rule_id, + enabled, + is_default_rule, ): new_id = self._push_rules_enable_id_gen.get_next() + + if not is_default_rule: + # first check it exists; we need to lock for key share so that a + # transaction that deletes the push rule will conflict with this one. + # We also need a push_rule_enable row to exist for every push_rules + # row, otherwise it is possible to simultaneously delete a push rule + # (that has no _enable row) and enable it, resulting in a dangling + # _enable row. To solve this: we either need to use SERIALISABLE or + # ensure we always have a push_rule_enable row for every push_rule + # row. We chose the latter. + for_key_share = "FOR KEY SHARE" + if not isinstance(self.database_engine, PostgresEngine): + # For key share is not applicable/available on SQLite + for_key_share = "" + sql = ( + """ + SELECT 1 FROM push_rules + WHERE user_name = ? AND rule_id = ? + %s + """ + % for_key_share + ) + txn.execute(sql, (user_id, rule_id)) + if txn.fetchone() is None: + # needed to set NOT_FOUND code. + raise NotFoundError("Push rule does not exist.") + self.db_pool.simple_upsert_txn( txn, "push_rules_enable", @@ -606,8 +686,30 @@ class PushRuleStore(PushRulesWorkerStore): ) async def set_push_rule_actions( - self, user_id, rule_id, actions, is_default_rule + self, + user_id: str, + rule_id: str, + actions: List[Union[dict, str]], + is_default_rule: bool, ) -> None: + """ + Sets the `actions` state of a push rule. + + Will throw NotFoundError if the rule does not exist; the Code for this + is NOT_FOUND. + + Args: + user_id: the user ID of the user who wishes to enable/disable the rule + e.g. '@tina:example.org' + rule_id: the full rule ID of the rule to be enabled/disabled + e.g. 'global/override/.m.rule.roomnotif' + or 'global/override/myCustomRule' + actions: A list of actions (each action being a dict or string), + e.g. ["notify", {"set_tweak": "highlight", "value": false}] + is_default_rule: True if and only if this is a server-default rule. + This skips the check for existence (as only user-created rules + are always stored in the database `push_rules` table). + """ actions_json = json_encoder.encode(actions) def set_push_rule_actions_txn(txn, stream_id, event_stream_ordering): @@ -629,12 +731,19 @@ class PushRuleStore(PushRulesWorkerStore): update_stream=False, ) else: - self.db_pool.simple_update_one_txn( - txn, - "push_rules", - {"user_name": user_id, "rule_id": rule_id}, - {"actions": actions_json}, - ) + try: + self.db_pool.simple_update_one_txn( + txn, + "push_rules", + {"user_name": user_id, "rule_id": rule_id}, + {"actions": actions_json}, + ) + except StoreError as serr: + if serr.code == 404: + # this sets the NOT_FOUND error Code + raise NotFoundError("Push rule does not exist") + else: + raise self._insert_push_rules_update_txn( txn, diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index 01f20c03c2..fcecb443d2 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py
@@ -156,6 +156,37 @@ class RegistrationWorkerStore(SQLBaseStore): "set_account_validity_for_user", set_account_validity_for_user_txn ) + async def get_expired_users(self): + """Get UserIDs of all expired users. + + Users who are not active, or do not have profile information, are + excluded from the results. + + Returns: + Deferred[List[UserID]]: List of expired user IDs + """ + + def get_expired_users_txn(txn, now_ms): + # We need to use pattern matching as profiles.user_id is confusingly just the + # user's localpart, whereas account_validity.user_id is a full user ID + sql = """ + SELECT av.user_id from account_validity AS av + LEFT JOIN profiles as p + ON av.user_id LIKE '%%' || p.user_id || ':%%' + WHERE expiration_ts_ms <= ? + AND p.active = 1 + """ + txn.execute(sql, (now_ms,)) + rows = txn.fetchall() + + return [UserID.from_string(row[0]) for row in rows] + + res = await self.db_pool.runInteraction( + "get_expired_users", get_expired_users_txn, self.clock.time_msec() + ) + + return res + async def set_renewal_token_for_user( self, user_id: str, renewal_token: str ) -> None: @@ -262,6 +293,54 @@ class RegistrationWorkerStore(SQLBaseStore): desc="delete_account_validity_for_user", ) + async def get_info_for_users( + self, user_ids: List[str], + ): + """Return the user info for a given set of users + + Args: + user_ids: A list of users to return information about + + Returns: + Deferred[Dict[str, bool]]: A dictionary mapping each user ID to + a dict with the following keys: + * expired - whether this is an expired user + * deactivated - whether this is a deactivated user + """ + # Get information of all our local users + def _get_info_for_users_txn(txn): + rows = [] + + for user_id in user_ids: + sql = """ + SELECT u.name, u.deactivated, av.expiration_ts_ms + FROM users as u + LEFT JOIN account_validity as av + ON av.user_id = u.name + WHERE u.name = ? + """ + + txn.execute(sql, (user_id,)) + row = txn.fetchone() + if row: + rows.append(row) + + return rows + + info_rows = await self.db_pool.runInteraction( + "get_info_for_users", _get_info_for_users_txn + ) + + return { + user_id: { + "expired": ( + expiration is not None and self.clock.time_msec() >= expiration + ), + "deactivated": deactivated == 1, + } + for user_id, deactivated, expiration in info_rows + } + async def is_server_admin(self, user: UserID) -> bool: """Determines if a user is an admin of this homeserver. diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 717df97301..d5dc7a36bb 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py
@@ -343,6 +343,23 @@ class RoomWorkerStore(SQLBaseStore): desc="is_room_blocked", ) + async def is_room_published(self, room_id: str) -> bool: + """Check whether a room has been published in the local public room + directory. + + Args: + room_id + Returns: + Whether the room is currently published in the room directory + """ + # Get room information + room_info = await self.get_room(room_id) + if not room_info: + return False + + # Check the is_public value + return room_info.get("is_public", False) + async def get_rooms_paginate( self, start: int, @@ -551,6 +568,11 @@ class RoomWorkerStore(SQLBaseStore): Returns: dict[int, int]: "min_lifetime" and "max_lifetime" for this room. """ + # If the room retention feature is disabled, return a policy with no minimum nor + # maximum, in order not to filter out events we should filter out when sending to + # the client. + if not self.config.retention_enabled: + return {"min_lifetime": None, "max_lifetime": None} def get_retention_policy_for_room_txn(txn): txn.execute( diff --git a/synapse/storage/databases/main/schema/delta/48/profiles_batch.sql b/synapse/storage/databases/main/schema/delta/48/profiles_batch.sql new file mode 100644
index 0000000000..e744c02fe8 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/48/profiles_batch.sql
@@ -0,0 +1,36 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Add a batch number to track changes to profiles and the + * order they're made in so we can replicate user profiles + * to other hosts as they change + */ +ALTER TABLE profiles ADD COLUMN batch BIGINT DEFAULT NULL; + +/* + * Index on the batch number so we can get profiles + * by their batch + */ +CREATE INDEX profiles_batch_idx ON profiles(batch); + +/* + * A table to track what batch of user profiles has been + * synced to what profile replication target. + */ +CREATE TABLE profile_replication_status ( + host TEXT NOT NULL, + last_synced_batch BIGINT NOT NULL +); diff --git a/synapse/storage/databases/main/schema/delta/50/profiles_deactivated_users.sql b/synapse/storage/databases/main/schema/delta/50/profiles_deactivated_users.sql new file mode 100644
index 0000000000..96051ac179 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/50/profiles_deactivated_users.sql
@@ -0,0 +1,23 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * A flag saying whether the user owning the profile has been deactivated + * This really belongs on the users table, not here, but the users table + * stores users by their full user_id and profiles stores them by localpart, + * so we can't easily join between the two tables. Plus, the batch number + * realy ought to represent data in this table that has changed. + */ +ALTER TABLE profiles ADD COLUMN active SMALLINT DEFAULT 1 NOT NULL; \ No newline at end of file diff --git a/synapse/storage/databases/main/schema/delta/55/profile_replication_status_index.sql b/synapse/storage/databases/main/schema/delta/55/profile_replication_status_index.sql new file mode 100644
index 0000000000..7542ab8cbd --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/55/profile_replication_status_index.sql
@@ -0,0 +1,16 @@ +/* Copyright 2019 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE UNIQUE INDEX profile_replication_status_idx ON profile_replication_status(host); \ No newline at end of file diff --git a/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres b/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres new file mode 100644
index 0000000000..b64926e9c9 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres
@@ -0,0 +1,33 @@ +/* Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This adds the method to the unique key constraint of the thumbnail databases. + * Otherwise you can't have a scaled and a cropped thumbnail with the same + * resolution, which happens quite often with dynamic thumbnailing. + * This is the postgres specific migration modifying the table with a background + * migration. + */ + +-- add new index that includes method to local media +INSERT INTO background_updates (update_name, progress_json) VALUES + ('local_media_repository_thumbnails_method_idx', '{}'); + +-- add new index that includes method to remote media +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('remote_media_repository_thumbnails_method_idx', '{}', 'local_media_repository_thumbnails_method_idx'); + +-- drop old index +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('media_repository_drop_index_wo_method', '{}', 'remote_media_repository_thumbnails_method_idx'); + diff --git a/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite b/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite new file mode 100644
index 0000000000..1d0c04b53a --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite
@@ -0,0 +1,44 @@ +/* Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This adds the method to the unique key constraint of the thumbnail databases. + * Otherwise you can't have a scaled and a cropped thumbnail with the same + * resolution, which happens quite often with dynamic thumbnailing. + * This is a sqlite specific migration, since sqlite can't modify the unique + * constraint of a table without recreating it. + */ + +CREATE TABLE local_media_repository_thumbnails_new ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method ) ); + +INSERT INTO local_media_repository_thumbnails_new + SELECT media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method, thumbnail_length + FROM local_media_repository_thumbnails; + +DROP TABLE local_media_repository_thumbnails; + +ALTER TABLE local_media_repository_thumbnails_new RENAME TO local_media_repository_thumbnails; + +CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails (media_id); + + + +CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails_new ( media_origin TEXT, media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_method TEXT, thumbnail_type TEXT, thumbnail_length INTEGER, filesystem_id TEXT, UNIQUE ( media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type, thumbnail_method ) ); + +INSERT INTO remote_media_cache_thumbnails_new + SELECT media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_method, thumbnail_type, thumbnail_length, filesystem_id + FROM remote_media_cache_thumbnails; + +DROP TABLE remote_media_cache_thumbnails; + +ALTER TABLE remote_media_cache_thumbnails_new RENAME TO remote_media_cache_thumbnails; diff --git a/synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql b/synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql new file mode 100644
index 0000000000..847aebd85e --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql
@@ -0,0 +1,28 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + Delete stuck 'enabled' bits that correspond to deleted or non-existent push rules. + We ignore rules that are server-default rules because they are not defined + in the `push_rules` table. +**/ + +DELETE FROM push_rules_enable WHERE + rule_id NOT LIKE 'global/%/.m.rule.%' + AND NOT EXISTS ( + SELECT 1 FROM push_rules + WHERE push_rules.user_name = push_rules_enable.user_name + AND push_rules.rule_id = push_rules_enable.rule_id + ); diff --git a/synapse/storage/databases/main/schema/delta/58/15_catchup_destination_rooms.sql b/synapse/storage/databases/main/schema/delta/58/15_catchup_destination_rooms.sql new file mode 100644
index 0000000000..ebfbed7925 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/15_catchup_destination_rooms.sql
@@ -0,0 +1,42 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +-- This schema delta alters the schema to enable 'catching up' remote homeservers +-- after there has been a connectivity problem for any reason. + +-- This stores, for each (destination, room) pair, the stream_ordering of the +-- latest event for that destination. +CREATE TABLE IF NOT EXISTS destination_rooms ( + -- the destination in question. + destination TEXT NOT NULL REFERENCES destinations (destination), + -- the ID of the room in question + room_id TEXT NOT NULL REFERENCES rooms (room_id), + -- the stream_ordering of the event + stream_ordering BIGINT NOT NULL, + PRIMARY KEY (destination, room_id) + -- We don't declare a foreign key on stream_ordering here because that'd mean + -- we'd need to either maintain an index (expensive) or do a table scan of + -- destination_rooms whenever we delete an event (also potentially expensive). + -- In addition to that, a foreign key on stream_ordering would be redundant + -- as this row doesn't need to refer to a specific event; if the event gets + -- deleted then it doesn't affect the validity of the stream_ordering here. +); + +-- This index is needed to make it so that a deletion of a room (in the rooms +-- table) can be efficient, as otherwise a table scan would need to be performed +-- to check that no destination_rooms rows point to the room to be deleted. +-- Also: it makes it efficient to delete all the entries for a given room ID, +-- such as when purging a room. +CREATE INDEX IF NOT EXISTS destination_rooms_room_id + ON destination_rooms (room_id); diff --git a/synapse/storage/databases/main/schema/delta/58/16populate_stats_process_rooms_fix.sql b/synapse/storage/databases/main/schema/delta/58/16populate_stats_process_rooms_fix.sql new file mode 100644
index 0000000000..55f5d0f732 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/16populate_stats_process_rooms_fix.sql
@@ -0,0 +1,22 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +-- This delta file fixes a regression introduced by 58/12room_stats.sql, removing the hacky +-- populate_stats_process_rooms_2 background job and restores the functionality under the +-- original name. +-- See https://github.com/matrix-org/synapse/issues/8238 for details + +DELETE FROM background_updates WHERE update_name = 'populate_stats_process_rooms'; +UPDATE background_updates SET update_name = 'populate_stats_process_rooms' + WHERE update_name = 'populate_stats_process_rooms_2'; diff --git a/synapse/storage/databases/main/schema/delta/58/17_catchup_last_successful.sql b/synapse/storage/databases/main/schema/delta/58/17_catchup_last_successful.sql new file mode 100644
index 0000000000..a67aa5e500 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/17_catchup_last_successful.sql
@@ -0,0 +1,21 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- This column tracks the stream_ordering of the event that was most recently +-- successfully transmitted to the destination. +-- A value of NULL means that we have not sent an event successfully yet +-- (at least, not since the introduction of this column). +ALTER TABLE destinations + ADD COLUMN last_successful_stream_ordering BIGINT; diff --git a/synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres b/synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres
index 889a9a0ce4..20c5af2eb7 100644 --- a/synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres +++ b/synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres
@@ -658,10 +658,19 @@ CREATE TABLE presence_stream ( +CREATE TABLE profile_replication_status ( + host text NOT NULL, + last_synced_batch bigint NOT NULL +); + + + CREATE TABLE profiles ( user_id text NOT NULL, displayname text, - avatar_url text + avatar_url text, + batch bigint, + active smallint DEFAULT 1 NOT NULL ); @@ -1788,6 +1797,10 @@ CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id); +CREATE INDEX profiles_batch_idx ON profiles USING btree (batch); + + + CREATE INDEX public_room_index ON rooms USING btree (is_public); diff --git a/synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite
index a0411ede7e..e28ec3fa45 100644 --- a/synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite +++ b/synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite
@@ -6,7 +6,7 @@ CREATE TABLE presence_allow_inbound( observed_user_id TEXT NOT NULL, observer_us CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, UNIQUE(name) ); CREATE TABLE access_tokens( id BIGINT PRIMARY KEY, user_id TEXT NOT NULL, device_id TEXT, token TEXT NOT NULL, last_used BIGINT, UNIQUE(token) ); CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL ); -CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) ); +CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, batch BIGINT DEFAULT NULL, active SMALLINT DEFAULT 1 NOT NULL, UNIQUE(user_id) ); CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) ); CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER ); CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, UNIQUE (event_id) ); @@ -202,6 +202,8 @@ CREATE INDEX group_users_u_idx ON group_users(user_id); CREATE INDEX group_invites_u_idx ON group_invites(user_id); CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms(group_id, room_id); CREATE INDEX group_rooms_r_idx ON group_rooms(room_id); +CREATE INDEX profiles_batch_idx ON profiles(batch); +CREATE TABLE profile_replication_status ( host TEXT NOT NULL, last_synced_batch BIGINT NOT NULL ); CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL ); CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp); CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp); diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py
index 55a250ef06..30840dbbaa 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py
@@ -74,9 +74,6 @@ class StatsStore(StateDeltasStore): "populate_stats_process_rooms", self._populate_stats_process_rooms ) self.db_pool.updates.register_background_update_handler( - "populate_stats_process_rooms_2", self._populate_stats_process_rooms_2 - ) - self.db_pool.updates.register_background_update_handler( "populate_stats_process_users", self._populate_stats_process_users ) # we no longer need to perform clean-up, but we will give ourselves @@ -148,31 +145,10 @@ class StatsStore(StateDeltasStore): return len(users_to_work_on) async def _populate_stats_process_rooms(self, progress, batch_size): - """ - This was a background update which regenerated statistics for rooms. - - It has been replaced by StatsStore._populate_stats_process_rooms_2. This background - job has been scheduled to run as part of Synapse v1.0.0, and again now. To ensure - someone upgrading from <v1.0.0, this background task has been turned into a no-op - so that the potentially expensive task is not run twice. - - Further context: https://github.com/matrix-org/synapse/pull/7977 - """ - await self.db_pool.updates._end_background_update( - "populate_stats_process_rooms" - ) - return 1 - - async def _populate_stats_process_rooms_2(self, progress, batch_size): - """ - This is a background update which regenerates statistics for rooms. - - It replaces StatsStore._populate_stats_process_rooms. See its docstring for the - reasoning. - """ + """This is a background update which regenerates statistics for rooms.""" if not self.stats_enabled: await self.db_pool.updates._end_background_update( - "populate_stats_process_rooms_2" + "populate_stats_process_rooms" ) return 1 @@ -189,13 +165,13 @@ class StatsStore(StateDeltasStore): return [r for r, in txn] rooms_to_work_on = await self.db_pool.runInteraction( - "populate_stats_rooms_2_get_batch", _get_next_batch + "populate_stats_rooms_get_batch", _get_next_batch ) # No more rooms -- complete the transaction. if not rooms_to_work_on: await self.db_pool.updates._end_background_update( - "populate_stats_process_rooms_2" + "populate_stats_process_rooms" ) return 1 @@ -204,9 +180,9 @@ class StatsStore(StateDeltasStore): progress["last_room_id"] = room_id await self.db_pool.runInteraction( - "_populate_stats_process_rooms_2", + "_populate_stats_process_rooms", self.db_pool.updates._background_update_progress_txn, - "populate_stats_process_rooms_2", + "populate_stats_process_rooms", progress, ) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py
index be6df8a6d1..08a13a8b47 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py
@@ -79,8 +79,8 @@ _EventDictReturn = namedtuple( def generate_pagination_where_clause( direction: str, column_names: Tuple[str, str], - from_token: Optional[Tuple[int, int]], - to_token: Optional[Tuple[int, int]], + from_token: Optional[Tuple[Optional[int], int]], + to_token: Optional[Tuple[Optional[int], int]], engine: BaseDatabaseEngine, ) -> str: """Creates an SQL expression to bound the columns by the pagination @@ -535,13 +535,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if limit == 0: return [], end_token - end_token = RoomStreamToken.parse(end_token) + parsed_end_token = RoomStreamToken.parse(end_token) rows, token = await self.db_pool.runInteraction( "get_recent_event_ids_for_room", self._paginate_room_events_txn, room_id, - from_token=end_token, + from_token=parsed_end_token, limit=limit, ) @@ -989,8 +989,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): bounds = generate_pagination_where_clause( direction=direction, column_names=("topological_ordering", "stream_ordering"), - from_token=from_token, - to_token=to_token, + from_token=from_token.as_tuple(), + to_token=to_token.as_tuple() if to_token else None, engine=self.database_engine, ) @@ -1083,16 +1083,17 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and `to_key`). """ - from_key = RoomStreamToken.parse(from_key) + parsed_from_key = RoomStreamToken.parse(from_key) + parsed_to_key = None if to_key: - to_key = RoomStreamToken.parse(to_key) + parsed_to_key = RoomStreamToken.parse(to_key) rows, token = await self.db_pool.runInteraction( "paginate_room_events", self._paginate_room_events_txn, room_id, - from_key, - to_key, + parsed_from_key, + parsed_to_key, direction, limit, event_filter, diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index 5b31aab700..c0a958252e 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py
@@ -15,13 +15,14 @@ import logging from collections import namedtuple -from typing import Optional, Tuple +from typing import Iterable, Optional, Tuple from canonicaljson import encode_canonical_json from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.storage.database import DatabasePool +from synapse.storage.database import DatabasePool, LoggingTransaction +from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import JsonDict from synapse.util.caches.expiringcache import ExpiringCache @@ -164,7 +165,9 @@ class TransactionStore(SQLBaseStore): allow_none=True, ) - if result and result["retry_last_ts"] > 0: + # check we have a row and retry_last_ts is not null or zero + # (retry_last_ts can't be negative) + if result and result["retry_last_ts"]: return result else: return None @@ -273,3 +276,98 @@ class TransactionStore(SQLBaseStore): await self.db_pool.runInteraction( "_cleanup_transactions", _cleanup_transactions_txn ) + + async def store_destination_rooms_entries( + self, destinations: Iterable[str], room_id: str, stream_ordering: int, + ) -> None: + """ + Updates or creates `destination_rooms` entries in batch for a single event. + + Args: + destinations: list of destinations + room_id: the room_id of the event + stream_ordering: the stream_ordering of the event + """ + + return await self.db_pool.runInteraction( + "store_destination_rooms_entries", + self._store_destination_rooms_entries_txn, + destinations, + room_id, + stream_ordering, + ) + + def _store_destination_rooms_entries_txn( + self, + txn: LoggingTransaction, + destinations: Iterable[str], + room_id: str, + stream_ordering: int, + ) -> None: + + # ensure we have a `destinations` row for this destination, as there is + # a foreign key constraint. + if isinstance(self.database_engine, PostgresEngine): + q = """ + INSERT INTO destinations (destination) + VALUES (?) + ON CONFLICT DO NOTHING; + """ + elif isinstance(self.database_engine, Sqlite3Engine): + q = """ + INSERT OR IGNORE INTO destinations (destination) + VALUES (?); + """ + else: + raise RuntimeError("Unknown database engine") + + txn.execute_batch(q, ((destination,) for destination in destinations)) + + rows = [(destination, room_id) for destination in destinations] + + self.db_pool.simple_upsert_many_txn( + txn, + "destination_rooms", + ["destination", "room_id"], + rows, + ["stream_ordering"], + [(stream_ordering,)] * len(rows), + ) + + async def get_destination_last_successful_stream_ordering( + self, destination: str + ) -> Optional[int]: + """ + Gets the stream ordering of the PDU most-recently successfully sent + to the specified destination, or None if this information has not been + tracked yet. + + Args: + destination: the destination to query + """ + return await self.db_pool.simple_select_one_onecol( + "destinations", + {"destination": destination}, + "last_successful_stream_ordering", + allow_none=True, + desc="get_last_successful_stream_ordering", + ) + + async def set_destination_last_successful_stream_ordering( + self, destination: str, last_successful_stream_ordering: int + ) -> None: + """ + Marks that we have successfully sent the PDUs up to and including the + one specified. + + Args: + destination: the destination we have successfully sent to + last_successful_stream_ordering: the stream_ordering of the most + recent successfully-sent PDU + """ + return await self.db_pool.simple_upsert( + "destinations", + keyvalues={"destination": destination}, + values={"last_successful_stream_ordering": last_successful_stream_ordering}, + desc="set_last_successful_stream_ordering", + ) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index ee60e2a718..a7f2dfb850 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py
@@ -19,12 +19,15 @@ import logging import os import re from collections import Counter -from typing import TextIO +from typing import Optional, TextIO import attr +from synapse.config.homeserver import HomeServerConfig +from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.engines.postgres import PostgresEngine -from synapse.storage.types import Cursor +from synapse.storage.types import Connection, Cursor +from synapse.types import Collection logger = logging.getLogger(__name__) @@ -63,7 +66,12 @@ UNAPPLIED_DELTA_ON_WORKER_ERROR = ( ) -def prepare_database(db_conn, database_engine, config, databases=["main", "state"]): +def prepare_database( + db_conn: Connection, + database_engine: BaseDatabaseEngine, + config: Optional[HomeServerConfig], + databases: Collection[str] = ["main", "state"], +): """Prepares a physical database for usage. Will either create all necessary tables or upgrade from an older schema version. @@ -73,16 +81,24 @@ def prepare_database(db_conn, database_engine, config, databases=["main", "state Args: db_conn: database_engine: - config (synapse.config.homeserver.HomeServerConfig|None): + config : application config, or None if we are connecting to an existing database which we expect to be configured already - databases (list[str]): The name of the databases that will be used + databases: The name of the databases that will be used with this physical database. Defaults to all databases. """ try: cur = db_conn.cursor() + # sqlite does not automatically start transactions for DDL / SELECT statements, + # so we start one before running anything. This ensures that any upgrades + # are either applied completely, or not at all. + # + # (psycopg2 automatically starts a transaction as soon as we run any statements + # at all, so this is redundant but harmless there.) + cur.execute("BEGIN TRANSACTION") + logger.info("%r: Checking existing schema version", databases) version_info = _get_or_create_schema_state(cur, database_engine) diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index b7eb4f8ac9..2a66b3ad4e 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py
@@ -224,6 +224,10 @@ class MultiWriterIdGenerator: # should be less than the minimum of this set (if not empty). self._unfinished_ids = set() # type: Set[int] + # Set of local IDs that we've processed that are larger than the current + # position, due to there being smaller unpersisted IDs. + self._finished_ids = set() # type: Set[int] + # We track the max position where we know everything before has been # persisted. This is done by a) looking at the min across all instances # and b) noting that if we have seen a run of persisted positions @@ -348,17 +352,44 @@ class MultiWriterIdGenerator: def _mark_id_as_finished(self, next_id: int): """The ID has finished being processed so we should advance the - current poistion if possible. + current position if possible. """ with self._lock: self._unfinished_ids.discard(next_id) + self._finished_ids.add(next_id) + + new_cur = None + + if self._unfinished_ids: + # If there are unfinished IDs then the new position will be the + # largest finished ID less than the minimum unfinished ID. + + finished = set() + + min_unfinshed = min(self._unfinished_ids) + for s in self._finished_ids: + if s < min_unfinshed: + if new_cur is None or new_cur < s: + new_cur = s + else: + finished.add(s) + + # We clear these out since they're now all less than the new + # position. + self._finished_ids = finished + else: + # There are no unfinished IDs so the new position is simply the + # largest finished one. + new_cur = max(self._finished_ids) + + # We clear these out since they're now all less than the new + # position. + self._finished_ids.clear() - # Figure out if its safe to advance the position by checking there - # aren't any lower allocated IDs that are yet to finish. - if all(c > next_id for c in self._unfinished_ids): + if new_cur: curr = self._current_positions.get(self._instance_name, 0) - self._current_positions[self._instance_name] = max(curr, next_id) + self._current_positions[self._instance_name] = max(curr, new_cur) self._add_persisted_position(next_id) @@ -428,7 +459,7 @@ class MultiWriterIdGenerator: # We move the current min position up if the minimum current positions # of all instances is higher (since by definition all positions less # that that have been persisted). - min_curr = min(self._current_positions.values()) + min_curr = min(self._current_positions.values(), default=0) self._persisted_upto_position = max(min_curr, self._persisted_upto_position) # We now iterate through the seen positions, discarding those that are diff --git a/synapse/streams/config.py b/synapse/streams/config.py
index d97dc4d101..0bdf846edf 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py
@@ -14,9 +14,13 @@ # limitations under the License. import logging +from typing import Optional + +import attr from synapse.api.errors import SynapseError from synapse.http.servlet import parse_integer, parse_string +from synapse.http.site import SynapseRequest from synapse.types import StreamToken logger = logging.getLogger(__name__) @@ -25,38 +29,22 @@ logger = logging.getLogger(__name__) MAX_LIMIT = 1000 -class SourcePaginationConfig: - - """A configuration object which stores pagination parameters for a - specific event source.""" - - def __init__(self, from_key=None, to_key=None, direction="f", limit=None): - self.from_key = from_key - self.to_key = to_key - self.direction = "f" if direction == "f" else "b" - self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None - - def __repr__(self): - return "StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)" % ( - self.from_key, - self.to_key, - self.direction, - self.limit, - ) - - +@attr.s(slots=True) class PaginationConfig: - """A configuration object which stores pagination parameters.""" - def __init__(self, from_token=None, to_token=None, direction="f", limit=None): - self.from_token = from_token - self.to_token = to_token - self.direction = "f" if direction == "f" else "b" - self.limit = min(int(limit), MAX_LIMIT) if limit is not None else None + from_token = attr.ib(type=Optional[StreamToken]) + to_token = attr.ib(type=Optional[StreamToken]) + direction = attr.ib(type=str) + limit = attr.ib(type=Optional[int]) @classmethod - def from_request(cls, request, raise_invalid_params=True, default_limit=None): + def from_request( + cls, + request: SynapseRequest, + raise_invalid_params: bool = True, + default_limit: Optional[int] = None, + ) -> "PaginationConfig": direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"]) from_tok = parse_string(request, "from") @@ -78,8 +66,11 @@ class PaginationConfig: limit = parse_integer(request, "limit", default=default_limit) - if limit and limit < 0: - raise SynapseError(400, "Limit must be 0 or above") + if limit: + if limit < 0: + raise SynapseError(400, "Limit must be 0 or above") + + limit = min(int(limit), MAX_LIMIT) try: return PaginationConfig(from_tok, to_tok, direction, limit) @@ -87,20 +78,10 @@ class PaginationConfig: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.") - def __repr__(self): + def __repr__(self) -> str: return ("PaginationConfig(from_tok=%r, to_tok=%r, direction=%r, limit=%r)") % ( self.from_token, self.to_token, self.direction, self.limit, ) - - def get_source_config(self, source_name): - keyname = "%s_key" % source_name - - return SourcePaginationConfig( - from_key=getattr(self.from_token, keyname), - to_key=getattr(self.to_token, keyname) if self.to_token else None, - direction=self.direction, - limit=self.limit, - ) diff --git a/synapse/third_party_rules/__init__.py b/synapse/third_party_rules/__init__.py new file mode 100644
index 0000000000..1453d04571 --- /dev/null +++ b/synapse/third_party_rules/__init__.py
@@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/synapse/third_party_rules/access_rules.py b/synapse/third_party_rules/access_rules.py new file mode 100644
index 0000000000..2519e05ae0 --- /dev/null +++ b/synapse/third_party_rules/access_rules.py
@@ -0,0 +1,947 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import email.utils +import logging +from typing import Dict, List, Optional, Tuple + +from twisted.internet import defer + +from synapse.api.constants import EventTypes, JoinRules, Membership, RoomCreationPreset +from synapse.api.errors import SynapseError +from synapse.config._base import ConfigError +from synapse.events import EventBase +from synapse.module_api import ModuleApi +from synapse.types import Requester, StateMap, UserID, get_domain_from_id + +logger = logging.getLogger(__name__) + +ACCESS_RULES_TYPE = "im.vector.room.access_rules" + + +class AccessRules: + DIRECT = "direct" + RESTRICTED = "restricted" + UNRESTRICTED = "unrestricted" + + +VALID_ACCESS_RULES = ( + AccessRules.DIRECT, + AccessRules.RESTRICTED, + AccessRules.UNRESTRICTED, +) + +# Rules to which we need to apply the power levels restrictions. +# +# These are all of the rules that neither: +# * forbid users from joining based on a server blacklist (which means that there +# is no need to apply power level restrictions), nor +# * target direct chats (since we allow both users to be room admins in this case). +# +# The power-level restrictions, when they are applied, prevent the following: +# * the default power level for users (users_default) being set to anything other than 0. +# * a non-default power level being assigned to any user which would be forbidden from +# joining a restricted room. +RULES_WITH_RESTRICTED_POWER_LEVELS = (AccessRules.UNRESTRICTED,) + + +class RoomAccessRules(object): + """Implementation of the ThirdPartyEventRules module API that allows federation admins + to define custom rules for specific events and actions. + Implements the custom behaviour for the "im.vector.room.access_rules" state event. + + Takes a config in the format: + + third_party_event_rules: + module: third_party_rules.RoomAccessRules + config: + # List of domains (server names) that can't be invited to rooms if the + # "restricted" rule is set. Defaults to an empty list. + domains_forbidden_when_restricted: [] + + # Identity server to use when checking the HS an email address belongs to + # using the /info endpoint. Required. + id_server: "vector.im" + + Don't forget to consider if you can invite users from your own domain. + """ + + def __init__( + self, config: Dict, module_api: ModuleApi, + ): + self.id_server = config["id_server"] + self.module_api = module_api + + self.domains_forbidden_when_restricted = config.get( + "domains_forbidden_when_restricted", [] + ) + + @staticmethod + def parse_config(config: Dict) -> Dict: + """Parses and validates the options specified in the homeserver config. + + Args: + config: The config dict. + + Returns: + The config dict. + + Raises: + ConfigError: If there was an issue with the provided module configuration. + """ + if "id_server" not in config: + raise ConfigError("No IS for event rules TchapEventRules") + + return config + + async def on_create_room( + self, requester: Requester, config: Dict, is_requester_admin: bool, + ) -> bool: + """Implements synapse.events.ThirdPartyEventRules.on_create_room. + + Checks if a im.vector.room.access_rules event is being set during room creation. + If yes, make sure the event is correct. Otherwise, append an event with the + default rule to the initial state. + + Checks if a m.rooms.power_levels event is being set during room creation. + If yes, make sure the event is allowed. Otherwise, set power_level_content_override + in the config dict to our modified version of the default room power levels. + + Args: + requester: The user who is making the createRoom request. + config: The createRoom config dict provided by the user. + is_requester_admin: Whether the requester is a Synapse admin. + + Returns: + Whether the request is allowed. + + Raises: + SynapseError: If the createRoom config dict is invalid or its contents blocked. + """ + is_direct = config.get("is_direct") + preset = config.get("preset") + access_rule = None + join_rule = None + + # If there's a rules event in the initial state, check if it complies with the + # spec for im.vector.room.access_rules and deny the request if not. + for event in config.get("initial_state", []): + if event["type"] == ACCESS_RULES_TYPE: + access_rule = event["content"].get("rule") + + # Make sure the event has a valid content. + if access_rule is None: + raise SynapseError(400, "Invalid access rule") + + # Make sure the rule name is valid. + if access_rule not in VALID_ACCESS_RULES: + raise SynapseError(400, "Invalid access rule") + + if (is_direct and access_rule != AccessRules.DIRECT) or ( + access_rule == AccessRules.DIRECT and not is_direct + ): + raise SynapseError(400, "Invalid access rule") + + if event["type"] == EventTypes.JoinRules: + join_rule = event["content"].get("join_rule") + + if access_rule is None: + # If there's no access rules event in the initial state, create one with the + # default setting. + if is_direct: + default_rule = AccessRules.DIRECT + else: + # If the default value for non-direct chat changes, we should make another + # case here for rooms created with either a "public" join_rule or the + # "public_chat" preset to make sure those keep defaulting to "restricted" + default_rule = AccessRules.RESTRICTED + + if not config.get("initial_state"): + config["initial_state"] = [] + + config["initial_state"].append( + { + "type": ACCESS_RULES_TYPE, + "state_key": "", + "content": {"rule": default_rule}, + } + ) + + access_rule = default_rule + + # Check that the preset in use is compatible with the access rule, whether it's + # user-defined or the default. + # + # Direct rooms may not have their join_rules set to JoinRules.PUBLIC. + if ( + join_rule == JoinRules.PUBLIC or preset == RoomCreationPreset.PUBLIC_CHAT + ) and access_rule == AccessRules.DIRECT: + raise SynapseError(400, "Invalid access rule") + + # Check if the creator can override values for the power levels. + allowed = self._is_power_level_content_allowed( + config.get("power_level_content_override", {}), access_rule + ) + if not allowed: + raise SynapseError(400, "Invalid power levels content override") + + use_default_power_levels = True + if config.get("power_level_content_override"): + use_default_power_levels = False + + # Second loop for events we need to know the current rule to process. + for event in config.get("initial_state", []): + if event["type"] == EventTypes.PowerLevels: + allowed = self._is_power_level_content_allowed( + event["content"], access_rule + ) + if not allowed: + raise SynapseError(400, "Invalid power levels content") + + use_default_power_levels = False + + # If power levels were not overridden by the user, override with DINUM's preferred + # defaults instead + if use_default_power_levels: + config["power_level_content_override"] = self._get_default_power_levels( + requester.user.to_string() + ) + + return True + + # If power levels are not overridden by the user during room creation, the following + # rules are used instead. Changes from Synapse's default power levels are noted. + # + # The same power levels are currently applied regardless of room preset. + @staticmethod + def _get_default_power_levels(user_id: str) -> Dict: + return { + "users": {user_id: 100}, + "users_default": 0, + "events": { + EventTypes.Name: 50, + EventTypes.PowerLevels: 100, + EventTypes.RoomHistoryVisibility: 100, + EventTypes.CanonicalAlias: 50, + EventTypes.RoomAvatar: 50, + EventTypes.Tombstone: 100, + EventTypes.ServerACL: 100, + EventTypes.RoomEncryption: 100, + }, + "events_default": 0, + "state_default": 100, # Admins should be the only ones to perform other tasks + "ban": 50, + "kick": 50, + "redact": 50, + "invite": 50, # All rooms should require mod to invite, even private + } + + @defer.inlineCallbacks + def check_threepid_can_be_invited( + self, medium: str, address: str, state_events: StateMap[EventBase], + ) -> bool: + """Implements synapse.events.ThirdPartyEventRules.check_threepid_can_be_invited. + + Check if a threepid can be invited to the room via a 3PID invite given the current + rules and the threepid's address, by retrieving the HS it's mapped to from the + configured identity server, and checking if we can invite users from it. + + Args: + medium: The medium of the threepid. + address: The address of the threepid. + state_events: A dict mapping (event type, state key) to state event. + State events in the room the threepid is being invited to. + + Returns: + Whether the threepid invite is allowed. + """ + rule = self._get_rule_from_state(state_events) + + if medium != "email": + return False + + if rule != AccessRules.RESTRICTED: + # Only "restricted" requires filtering 3PID invites. We don't need to do + # anything for "direct" here, because only "restricted" requires filtering + # based on the HS the address is mapped to. + return True + + parsed_address = email.utils.parseaddr(address)[1] + if parsed_address != address: + # Avoid reproducing the security issue described here: + # https://matrix.org/blog/2019/04/18/security-update-sydent-1-0-2 + # It's probably not worth it but let's just be overly safe here. + return False + + # Get the HS this address belongs to from the identity server. + res = yield self.module_api.http_client.get_json( + "https://%s/_matrix/identity/api/v1/info" % (self.id_server,), + {"medium": medium, "address": address}, + ) + + # Look for a domain that's not forbidden from being invited. + if not res.get("hs"): + return False + if res.get("hs") in self.domains_forbidden_when_restricted: + return False + + return True + + async def check_event_allowed( + self, event: EventBase, state_events: StateMap[EventBase], + ) -> bool: + """Implements synapse.events.ThirdPartyEventRules.check_event_allowed. + + Checks the event's type and the current rule and calls the right function to + determine whether the event can be allowed. + + Args: + event: The event to check. + state_events: A dict mapping (event type, state key) to state event. + State events in the room the event originated from. + + Returns: + True if the event can be allowed, False otherwise. + """ + if event.type == ACCESS_RULES_TYPE: + return await self._on_rules_change(event, state_events) + + # We need to know the rule to apply when processing the event types below. + rule = self._get_rule_from_state(state_events) + + if event.type == EventTypes.PowerLevels: + return self._is_power_level_content_allowed( + event.content, rule, on_room_creation=False + ) + + if event.type == EventTypes.Member or event.type == EventTypes.ThirdPartyInvite: + return await self._on_membership_or_invite(event, rule, state_events) + + if event.type == EventTypes.JoinRules: + return self._on_join_rule_change(event, rule) + + if event.type == EventTypes.RoomAvatar: + return self._on_room_avatar_change(event, rule) + + if event.type == EventTypes.Name: + return self._on_room_name_change(event, rule) + + if event.type == EventTypes.Topic: + return self._on_room_topic_change(event, rule) + + return True + + async def check_visibility_can_be_modified( + self, room_id: str, state_events: StateMap[EventBase], new_visibility: str + ) -> bool: + """Implements + synapse.events.ThirdPartyEventRules.check_visibility_can_be_modified + + Determines whether a room can be published, or removed from, the public room + list. A room is published if its visibility is set to "public". Otherwise, + its visibility is "private". A room with access rule other than "restricted" + may not be published. + + Args: + room_id: The ID of the room. + state_events: A dict mapping (event type, state key) to state event. + State events in the room. + new_visibility: The new visibility state. Either "public" or "private". + + Returns: + Whether the room is allowed to be published to, or removed from, the public + rooms directory. + """ + # We need to know the rule to apply when processing the event types below. + rule = self._get_rule_from_state(state_events) + + # Allow adding a room to the public rooms list only if it is restricted + if new_visibility == "public": + return rule == AccessRules.RESTRICTED + + # By default a room is created as "restricted", meaning it is allowed to be + # published to the public rooms directory. + return True + + async def _on_rules_change( + self, event: EventBase, state_events: StateMap[EventBase] + ): + """Checks whether an im.vector.room.access_rules event is forbidden or allowed. + + Args: + event: The im.vector.room.access_rules event. + state_events: A dict mapping (event type, state key) to state event. + State events in the room before the event was sent. + Returns: + True if the event can be allowed, False otherwise. + """ + new_rule = event.content.get("rule") + + # Check for invalid values. + if new_rule not in VALID_ACCESS_RULES: + return False + + # Make sure we don't apply "direct" if the room has more than two members. + if new_rule == AccessRules.DIRECT: + existing_members, threepid_tokens = self._get_members_and_tokens_from_state( + state_events + ) + + if len(existing_members) > 2 or len(threepid_tokens) > 1: + return False + + if new_rule != AccessRules.RESTRICTED: + # Block this change if this room is currently listed in the public rooms + # directory + if await self.module_api.public_room_list_manager.room_is_in_public_room_list( + event.room_id + ): + return False + + prev_rules_event = state_events.get((ACCESS_RULES_TYPE, "")) + + # Now that we know the new rule doesn't break the "direct" case, we can allow any + # new rule in rooms that had none before. + if prev_rules_event is None: + return True + + prev_rule = prev_rules_event.content.get("rule") + + # Currently, we can only go from "restricted" to "unrestricted". + return ( + prev_rule == AccessRules.RESTRICTED and new_rule == AccessRules.UNRESTRICTED + ) + + async def _on_membership_or_invite( + self, event: EventBase, rule: str, state_events: StateMap[EventBase], + ) -> bool: + """Applies the correct rule for incoming m.room.member and + m.room.third_party_invite events. + + Args: + event: The event to check. + rule: The name of the rule to apply. + state_events: A dict mapping (event type, state key) to state event. + The state of the room before the event was sent. + + Returns: + True if the event can be allowed, False otherwise. + """ + if rule == AccessRules.RESTRICTED: + ret = self._on_membership_or_invite_restricted(event) + elif rule == AccessRules.UNRESTRICTED: + ret = self._on_membership_or_invite_unrestricted(event, state_events) + elif rule == AccessRules.DIRECT: + ret = self._on_membership_or_invite_direct(event, state_events) + else: + # We currently apply the default (restricted) if we don't know the rule, we + # might want to change that in the future. + ret = self._on_membership_or_invite_restricted(event) + + if event.type == "m.room.member": + # If this is an admin leaving, and they are the last admin in the room, + # raise the power levels of the room so that the room is 'frozen'. + # + # We have to freeze the room by puppeting an admin user, which we can + # only do for local users + if ( + self._is_local_user(event.sender) + and event.membership == Membership.LEAVE + ): + await self._freeze_room_if_last_admin_is_leaving(event, state_events) + + return ret + + async def _freeze_room_if_last_admin_is_leaving( + self, event: EventBase, state_events: StateMap[EventBase] + ): + power_level_state_event = state_events.get( + (EventTypes.PowerLevels, "") + ) # type: EventBase + if not power_level_state_event: + return + power_level_content = power_level_state_event.content + + # Do some validation checks on the power level state event + if ( + not isinstance(power_level_content, dict) + or "users" not in power_level_content + or not isinstance(power_level_content["users"], dict) + ): + # We can't use this power level event to determine whether the room should be + # frozen. Bail out. + return + + user_id = event.get("sender") + if not user_id: + return + + # Get every admin user defined in the room's state + admin_users = { + user + for user, power_level in power_level_content["users"].items() + if power_level >= 100 + } + + if user_id not in admin_users: + # This user is not an admin, ignore them + return + + if any( + event_type == EventTypes.Member + and event.membership in [Membership.JOIN, Membership.INVITE] + and state_key in admin_users + and state_key != user_id + for (event_type, state_key), event in state_events.items() + ): + # There's another admin user in, or invited to, the room + return + + # Freeze the room by raising the required power level to send events to 100 + logger.info("Freezing room '%s'", event.room_id) + + # Modify the existing power levels to raise all required types to 100 + # + # This changes a power level state event's content from something like: + # { + # "redact": 50, + # "state_default": 50, + # "ban": 50, + # "notifications": { + # "room": 50 + # }, + # "events": { + # "m.room.avatar": 50, + # "m.room.encryption": 50, + # "m.room.canonical_alias": 50, + # "m.room.name": 50, + # "im.vector.modular.widgets": 50, + # "m.room.topic": 50, + # "m.room.tombstone": 50, + # "m.room.history_visibility": 100, + # "m.room.power_levels": 100 + # }, + # "users_default": 0, + # "events_default": 0, + # "users": { + # "@admin:example.com": 100, + # }, + # "kick": 50, + # "invite": 0 + # } + # + # to + # + # { + # "redact": 100, + # "state_default": 100, + # "ban": 100, + # "notifications": { + # "room": 50 + # }, + # "events": {} + # "users_default": 0, + # "events_default": 100, + # "users": { + # "@admin:example.com": 100, + # }, + # "kick": 100, + # "invite": 100 + # } + new_content = {} + for key, value in power_level_content.items(): + # Do not change "users_default", as that key specifies the default power + # level of new users + if isinstance(value, int) and key != "users_default": + value = 100 + new_content[key] = value + + # Set some values in case they are missing from the original + # power levels event content + new_content.update( + { + # Clear out any special-cased event keys + "events": {}, + # Ensure state_default and events_default keys exist and are 100. + # Otherwise a lower PL user could potentially send state events that + # aren't explicitly mentioned elsewhere in the power level dict + "state_default": 100, + "events_default": 100, + # Membership events default to 50 if they aren't present. Set them + # to 100 here, as they would be set to 100 if they were present anyways + "ban": 100, + "kick": 100, + "invite": 100, + "redact": 100, + } + ) + + await self.module_api.create_and_send_event_into_room( + { + "room_id": event.room_id, + "sender": user_id, + "type": EventTypes.PowerLevels, + "content": new_content, + "state_key": "", + } + ) + + def _on_membership_or_invite_restricted(self, event: EventBase) -> bool: + """Implements the checks and behaviour specified for the "restricted" rule. + + "restricted" currently means that users can only invite users if their server is + included in a limited list of domains. + + Args: + event: The event to check. + + Returns: + True if the event can be allowed, False otherwise. + """ + # We're not applying the rules on m.room.third_party_member events here because + # the filtering on threepids is done in check_threepid_can_be_invited, which is + # called before check_event_allowed. + if event.type == EventTypes.ThirdPartyInvite: + return True + + # We only need to process "join" and "invite" memberships, in order to be backward + # compatible, e.g. if a user from a blacklisted server joined a restricted room + # before the rules started being enforced on the server, that user must be able to + # leave it. + if event.membership not in [Membership.JOIN, Membership.INVITE]: + return True + + invitee_domain = get_domain_from_id(event.state_key) + return invitee_domain not in self.domains_forbidden_when_restricted + + def _on_membership_or_invite_unrestricted( + self, event: EventBase, state_events: StateMap[EventBase] + ) -> bool: + """Implements the checks and behaviour specified for the "unrestricted" rule. + + "unrestricted" currently means that forbidden users cannot join without an invite. + + Returns: + True if the event can be allowed, False otherwise. + """ + # If this is a join from a forbidden user and they don't have an invite to the + # room, then deny it + if event.type == EventTypes.Member and event.membership == Membership.JOIN: + # Check if this user is from a forbidden server + target_domain = get_domain_from_id(event.state_key) + if target_domain in self.domains_forbidden_when_restricted: + # If so, they'll need an invite to join this room. Check if one exists + if not self._user_is_invited_to_room(event.state_key, state_events): + return False + + return True + + def _on_membership_or_invite_direct( + self, event: EventBase, state_events: StateMap[EventBase], + ) -> bool: + """Implements the checks and behaviour specified for the "direct" rule. + + "direct" currently means that no member is allowed apart from the two initial + members the room was created for (i.e. the room's creator and their first invitee). + + Args: + event: The event to check. + state_events: A dict mapping (event type, state key) to state event. + The state of the room before the event was sent. + + Returns: + True if the event can be allowed, False otherwise. + """ + # Get the room memberships and 3PID invite tokens from the room's state. + existing_members, threepid_tokens = self._get_members_and_tokens_from_state( + state_events + ) + + # There should never be more than one 3PID invite in the room state: if the second + # original user came and left, and we're inviting them using their email address, + # given we know they have a Matrix account binded to the address (so they could + # join the first time), Synapse will successfully look it up before attempting to + # store an invite on the IS. + if len(threepid_tokens) == 1 and event.type == EventTypes.ThirdPartyInvite: + # If we already have a 3PID invite in flight, don't accept another one, unless + # the new one has the same invite token as its state key. This is because 3PID + # invite revocations must be allowed, and a revocation is basically a new 3PID + # invite event with an empty content and the same token as the invite it + # revokes. + return event.state_key in threepid_tokens + + if len(existing_members) == 2: + # If the user was within the two initial user of the room, Synapse would have + # looked it up successfully and thus sent a m.room.member here instead of + # m.room.third_party_invite. + if event.type == EventTypes.ThirdPartyInvite: + return False + + # We can only have m.room.member events here. The rule in this case is to only + # allow the event if its target is one of the initial two members in the room, + # i.e. the state key of one of the two m.room.member states in the room. + return event.state_key in existing_members + + # We're alone in the room (and always have been) and there's one 3PID invite in + # flight. + if len(existing_members) == 1 and len(threepid_tokens) == 1: + # We can only have m.room.member events here. In this case, we can only allow + # the event if it's either a m.room.member from the joined user (we can assume + # that the only m.room.member event is a join otherwise we wouldn't be able to + # send an event to the room) or an an invite event which target is the invited + # user. + target = event.state_key + is_from_threepid_invite = self._is_invite_from_threepid( + event, threepid_tokens[0] + ) + return is_from_threepid_invite or target == existing_members[0] + + return True + + def _is_power_level_content_allowed( + self, content: Dict, access_rule: str, on_room_creation: bool = True + ) -> bool: + """Check if a given power levels event is permitted under the given access rule. + + It shouldn't be allowed if it either changes the default PL to a non-0 value or + gives a non-0 PL to a user that would have been forbidden from joining the room + under a more restrictive access rule. + + Args: + content: The content of the m.room.power_levels event to check. + access_rule: The access rule in place in this room. + on_room_creation: True if this call is happening during a room's + creation, False otherwise. + + Returns: + Whether the content of the power levels event is valid. + """ + # Only enforce these rules during room creation + # + # We want to allow admins to modify or fix the power levels in a room if they + # have a special circumstance, but still want to encourage a certain pattern during + # room creation. + if on_room_creation: + # If invite requirements are <PL50 + if content.get("invite", 50) < 50: + return False + + # If "other" state requirements are <PL100 + if content.get("state_default", 100) < 100: + return False + + # Check if we need to apply the restrictions with the current rule. + if access_rule not in RULES_WITH_RESTRICTED_POWER_LEVELS: + return True + + # If users_default is explicitly set to a non-0 value, deny the event. + users_default = content.get("users_default", 0) + if users_default: + return False + + users = content.get("users", {}) + for user_id, power_level in users.items(): + server_name = get_domain_from_id(user_id) + # Check the domain against the blacklist. If found, and the PL isn't 0, deny + # the event. + if ( + server_name in self.domains_forbidden_when_restricted + and power_level != 0 + ): + return False + + return True + + def _on_join_rule_change(self, event: EventBase, rule: str) -> bool: + """Check whether a join rule change is allowed. + + A join rule change is always allowed unless the new join rule is "public" and + the current access rule is "direct". + + Args: + event: The event to check. + rule: The name of the rule to apply. + + Returns: + Whether the change is allowed. + """ + if event.content.get("join_rule") == JoinRules.PUBLIC: + return rule != AccessRules.DIRECT + + return True + + def _on_room_avatar_change(self, event: EventBase, rule: str) -> bool: + """Check whether a change of room avatar is allowed. + The current rule is to forbid such a change in direct chats but allow it + everywhere else. + + Args: + event: The event to check. + rule: The name of the rule to apply. + + Returns: + True if the event can be allowed, False otherwise. + """ + return rule != AccessRules.DIRECT + + def _on_room_name_change(self, event: EventBase, rule: str) -> bool: + """Check whether a change of room name is allowed. + The current rule is to forbid such a change in direct chats but allow it + everywhere else. + + Args: + event: The event to check. + rule: The name of the rule to apply. + + Returns: + True if the event can be allowed, False otherwise. + """ + return rule != AccessRules.DIRECT + + def _on_room_topic_change(self, event: EventBase, rule: str) -> bool: + """Check whether a change of room topic is allowed. + The current rule is to forbid such a change in direct chats but allow it + everywhere else. + + Args: + event: The event to check. + rule: The name of the rule to apply. + + Returns: + True if the event can be allowed, False otherwise. + """ + return rule != AccessRules.DIRECT + + @staticmethod + def _get_rule_from_state(state_events: StateMap[EventBase]) -> Optional[str]: + """Extract the rule to be applied from the given set of state events. + + Args: + state_events: A dict mapping (event type, state key) to state event. + + Returns: + The name of the rule (either "direct", "restricted" or "unrestricted") if found, + else None. + """ + access_rules = state_events.get((ACCESS_RULES_TYPE, "")) + if access_rules is None: + return AccessRules.RESTRICTED + + return access_rules.content.get("rule") + + @staticmethod + def _get_join_rule_from_state(state_events: StateMap[EventBase]) -> Optional[str]: + """Extract the room's join rule from the given set of state events. + + Args: + state_events (dict[tuple[event type, state key], EventBase]): The set of state + events. + + Returns: + The name of the join rule (either "public", or "invite") if found, else None. + """ + join_rule_event = state_events.get((EventTypes.JoinRules, "")) + if join_rule_event is None: + return None + + return join_rule_event.content.get("join_rule") + + @staticmethod + def _get_members_and_tokens_from_state( + state_events: StateMap[EventBase], + ) -> Tuple[List[str], List[str]]: + """Retrieves the list of users that have a m.room.member event in the room, + as well as 3PID invites tokens in the room. + + Args: + state_events: A dict mapping (event type, state key) to state event. + + Returns: + A tuple containing the: + * targets of the m.room.member events in the state. + * 3PID invite tokens in the state. + """ + existing_members = [] + threepid_invite_tokens = [] + for key, state_event in state_events.items(): + if key[0] == EventTypes.Member and state_event.content: + existing_members.append(state_event.state_key) + if key[0] == EventTypes.ThirdPartyInvite and state_event.content: + # Don't include revoked invites. + threepid_invite_tokens.append(state_event.state_key) + + return existing_members, threepid_invite_tokens + + @staticmethod + def _is_invite_from_threepid(invite: EventBase, threepid_invite_token: str) -> bool: + """Checks whether the given invite follows the given 3PID invite. + + Args: + invite: The m.room.member event with "invite" membership. + threepid_invite_token: The state key from the 3PID invite. + + Returns: + Whether the invite is due to the given 3PID invite. + """ + token = ( + invite.content.get("third_party_invite", {}) + .get("signed", {}) + .get("token", "") + ) + + return token == threepid_invite_token + + def _is_local_user(self, user_id: str) -> bool: + """Checks whether a given user ID belongs to this homeserver, or a remote + + Args: + user_id: A user ID to check. + + Returns: + True if the user belongs to this homeserver, False otherwise. + """ + user = UserID.from_string(user_id) + + # Extract the localpart and ask the module API for a user ID from the localpart + # The module API will append the local homeserver's server_name + local_user_id = self.module_api.get_qualified_user_id(user.localpart) + + # If the user ID we get based on the localpart is the same as the original user ID, + # then they were a local user + return user_id == local_user_id + + def _user_is_invited_to_room( + self, user_id: str, state_events: StateMap[EventBase] + ) -> bool: + """Checks whether a given user has been invited to a room + + A user has an invite for a room if its state contains a `m.room.member` + event with membership "invite" and their user ID as the state key. + + Args: + user_id: The user to check. + state_events: The state events from the room. + + Returns: + True if the user has been invited to the room, or False if they haven't. + """ + for (event_type, state_key), state_event in state_events.items(): + if ( + event_type == EventTypes.Member + and state_key == user_id + and state_event.membership == Membership.INVITE + ): + return True + + return False diff --git a/synapse/types.py b/synapse/types.py
index f7de48f148..2061429c90 100644 --- a/synapse/types.py +++ b/synapse/types.py
@@ -18,10 +18,11 @@ import re import string import sys from collections import namedtuple -from typing import Any, Dict, Mapping, MutableMapping, Tuple, Type, TypeVar +from typing import Any, Dict, Mapping, MutableMapping, Optional, Tuple, Type, TypeVar import attr from signedjson.key import decode_verify_key_bytes +from six.moves import filter from unpaddedbase64 import decode_base64 from synapse.api.errors import Codes, SynapseError @@ -296,6 +297,19 @@ def contains_invalid_mxid_characters(localpart): return any(c not in mxid_localpart_allowed_characters for c in localpart) +def strip_invalid_mxid_characters(localpart): + """Removes any invalid characters from an mxid + + Args: + localpart (basestring): the localpart to be stripped + + Returns: + localpart (basestring): the localpart having been stripped + """ + filtered = filter(lambda c: c in mxid_localpart_allowed_characters, localpart) + return "".join(filtered) + + UPPER_CASE_PATTERN = re.compile(b"[A-Z_]") # the following is a pattern which matches '=', and bytes which are not allowed in a mxid @@ -362,22 +376,79 @@ def map_username_to_mxid_localpart(username, case_sensitive=False): return username.decode("ascii") -class StreamToken( - namedtuple( - "Token", - ( - "room_key", - "presence_key", - "typing_key", - "receipt_key", - "account_data_key", - "push_rules_key", - "to_device_key", - "device_list_key", - "groups_key", - ), +@attr.s(frozen=True, slots=True) +class RoomStreamToken: + """Tokens are positions between events. The token "s1" comes after event 1. + + s0 s1 + | | + [0] V [1] V [2] + + Tokens can either be a point in the live event stream or a cursor going + through historic events. + + When traversing the live event stream events are ordered by when they + arrived at the homeserver. + + When traversing historic events the events are ordered by their depth in + the event graph "topological_ordering" and then by when they arrived at the + homeserver "stream_ordering". + + Live tokens start with an "s" followed by the "stream_ordering" id of the + event it comes after. Historic tokens start with a "t" followed by the + "topological_ordering" id of the event it comes after, followed by "-", + followed by the "stream_ordering" id of the event it comes after. + """ + + topological = attr.ib( + type=Optional[int], + validator=attr.validators.optional(attr.validators.instance_of(int)), ) -): + stream = attr.ib(type=int, validator=attr.validators.instance_of(int)) + + @classmethod + def parse(cls, string: str) -> "RoomStreamToken": + try: + if string[0] == "s": + return cls(topological=None, stream=int(string[1:])) + if string[0] == "t": + parts = string[1:].split("-", 1) + return cls(topological=int(parts[0]), stream=int(parts[1])) + except Exception: + pass + raise SynapseError(400, "Invalid token %r" % (string,)) + + @classmethod + def parse_stream_token(cls, string: str) -> "RoomStreamToken": + try: + if string[0] == "s": + return cls(topological=None, stream=int(string[1:])) + except Exception: + pass + raise SynapseError(400, "Invalid token %r" % (string,)) + + def as_tuple(self) -> Tuple[Optional[int], int]: + return (self.topological, self.stream) + + def __str__(self) -> str: + if self.topological is not None: + return "t%d-%d" % (self.topological, self.stream) + else: + return "s%d" % (self.stream,) + + +@attr.s(slots=True, frozen=True) +class StreamToken: + room_key = attr.ib(type=str) + presence_key = attr.ib(type=int) + typing_key = attr.ib(type=int) + receipt_key = attr.ib(type=int) + account_data_key = attr.ib(type=int) + push_rules_key = attr.ib(type=int) + to_device_key = attr.ib(type=int) + device_list_key = attr.ib(type=int) + groups_key = attr.ib(type=int) + _SEPARATOR = "_" START = None # type: StreamToken @@ -385,15 +456,15 @@ class StreamToken( def from_string(cls, string): try: keys = string.split(cls._SEPARATOR) - while len(keys) < len(cls._fields): + while len(keys) < len(attr.fields(cls)): # i.e. old token from before receipt_key keys.append("0") - return cls(*keys) + return cls(keys[0], *(int(k) for k in keys[1:])) except Exception: raise SynapseError(400, "Invalid Token") def to_string(self): - return self._SEPARATOR.join([str(k) for k in self]) + return self._SEPARATOR.join([str(k) for k in attr.astuple(self)]) @property def room_stream_id(self): @@ -435,63 +506,10 @@ class StreamToken( return self def copy_and_replace(self, key, new_value): - return self._replace(**{key: new_value}) - - -StreamToken.START = StreamToken(*(["s0"] + ["0"] * (len(StreamToken._fields) - 1))) + return attr.evolve(self, **{key: new_value}) -class RoomStreamToken(namedtuple("_StreamToken", "topological stream")): - """Tokens are positions between events. The token "s1" comes after event 1. - - s0 s1 - | | - [0] V [1] V [2] - - Tokens can either be a point in the live event stream or a cursor going - through historic events. - - When traversing the live event stream events are ordered by when they - arrived at the homeserver. - - When traversing historic events the events are ordered by their depth in - the event graph "topological_ordering" and then by when they arrived at the - homeserver "stream_ordering". - - Live tokens start with an "s" followed by the "stream_ordering" id of the - event it comes after. Historic tokens start with a "t" followed by the - "topological_ordering" id of the event it comes after, followed by "-", - followed by the "stream_ordering" id of the event it comes after. - """ - - __slots__ = [] # type: list - - @classmethod - def parse(cls, string): - try: - if string[0] == "s": - return cls(topological=None, stream=int(string[1:])) - if string[0] == "t": - parts = string[1:].split("-", 1) - return cls(topological=int(parts[0]), stream=int(parts[1])) - except Exception: - pass - raise SynapseError(400, "Invalid token %r" % (string,)) - - @classmethod - def parse_stream_token(cls, string): - try: - if string[0] == "s": - return cls(topological=None, stream=int(string[1:])) - except Exception: - pass - raise SynapseError(400, "Invalid token %r" % (string,)) - - def __str__(self): - if self.topological is not None: - return "t%d-%d" % (self.topological, self.stream) - else: - return "s%d" % (self.stream,) +StreamToken.START = StreamToken.from_string("s0_0") class ThirdPartyInstanceID( diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index a13f11f8d8..60ecc498ab 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py
@@ -13,11 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import logging import re import attr -from canonicaljson import json from twisted.internet import defer, task diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index bb57e27beb..67ce9a5f39 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py
@@ -17,13 +17,25 @@ import collections import logging from contextlib import contextmanager -from typing import Dict, Sequence, Set, Union +from typing import ( + Any, + Callable, + Dict, + Hashable, + Iterable, + List, + Optional, + Set, + TypeVar, + Union, +) import attr from typing_extensions import ContextManager from twisted.internet import defer from twisted.internet.defer import CancelledError +from twisted.internet.interfaces import IReactorTime from twisted.python import failure from synapse.logging.context import ( @@ -54,7 +66,7 @@ class ObservableDeferred: __slots__ = ["_deferred", "_observers", "_result"] - def __init__(self, deferred, consumeErrors=False): + def __init__(self, deferred: defer.Deferred, consumeErrors: bool = False): object.__setattr__(self, "_deferred", deferred) object.__setattr__(self, "_result", None) object.__setattr__(self, "_observers", set()) @@ -111,25 +123,25 @@ class ObservableDeferred: success, res = self._result return defer.succeed(res) if success else defer.fail(res) - def observers(self): + def observers(self) -> List[defer.Deferred]: return self._observers - def has_called(self): + def has_called(self) -> bool: return self._result is not None - def has_succeeded(self): + def has_succeeded(self) -> bool: return self._result is not None and self._result[0] is True - def get_result(self): + def get_result(self) -> Any: return self._result[1] - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: return getattr(self._deferred, name) - def __setattr__(self, name, value): + def __setattr__(self, name: str, value: Any) -> None: setattr(self._deferred, name, value) - def __repr__(self): + def __repr__(self) -> str: return "<ObservableDeferred object at %s, result=%r, _deferred=%r>" % ( id(self), self._result, @@ -137,18 +149,20 @@ class ObservableDeferred: ) -def concurrently_execute(func, args, limit): - """Executes the function with each argument conncurrently while limiting +def concurrently_execute( + func: Callable, args: Iterable[Any], limit: int +) -> defer.Deferred: + """Executes the function with each argument concurrently while limiting the number of concurrent executions. Args: - func (func): Function to execute, should return a deferred or coroutine. - args (Iterable): List of arguments to pass to func, each invocation of func + func: Function to execute, should return a deferred or coroutine. + args: List of arguments to pass to func, each invocation of func gets a single argument. - limit (int): Maximum number of conccurent executions. + limit: Maximum number of conccurent executions. Returns: - deferred: Resolved when all function invocations have finished. + Deferred[list]: Resolved when all function invocations have finished. """ it = iter(args) @@ -167,14 +181,17 @@ def concurrently_execute(func, args, limit): ).addErrback(unwrapFirstError) -def yieldable_gather_results(func, iter, *args, **kwargs): +def yieldable_gather_results( + func: Callable, iter: Iterable, *args: Any, **kwargs: Any +) -> defer.Deferred: """Executes the function with each argument concurrently. Args: - func (func): Function to execute that returns a Deferred - iter (iter): An iterable that yields items that get passed as the first + func: Function to execute that returns a Deferred + iter: An iterable that yields items that get passed as the first argument to the function *args: Arguments to be passed to each call to func + **kwargs: Keyword arguments to be passed to each call to func Returns Deferred[list]: Resolved when all functions have been invoked, or errors if @@ -188,24 +205,37 @@ def yieldable_gather_results(func, iter, *args, **kwargs): ).addErrback(unwrapFirstError) +@attr.s(slots=True) +class _LinearizerEntry: + # The number of things executing. + count = attr.ib(type=int) + # Deferreds for the things blocked from executing. + deferreds = attr.ib(type=collections.OrderedDict) + + class Linearizer: """Limits concurrent access to resources based on a key. Useful to ensure only a few things happen at a time on a given resource. Example: - with (yield limiter.queue("test_key")): + with await limiter.queue("test_key"): # do some work. """ - def __init__(self, name=None, max_count=1, clock=None): + def __init__( + self, + name: Optional[str] = None, + max_count: int = 1, + clock: Optional[Clock] = None, + ): """ Args: - max_count(int): The maximum number of concurrent accesses + max_count: The maximum number of concurrent accesses """ if name is None: - self.name = id(self) + self.name = id(self) # type: Union[str, int] else: self.name = name @@ -216,15 +246,10 @@ class Linearizer: self._clock = clock self.max_count = max_count - # key_to_defer is a map from the key to a 2 element list where - # the first element is the number of things executing, and - # the second element is an OrderedDict, where the keys are deferreds for the - # things blocked from executing. - self.key_to_defer = ( - {} - ) # type: Dict[str, Sequence[Union[int, Dict[defer.Deferred, int]]]] + # key_to_defer is a map from the key to a _LinearizerEntry. + self.key_to_defer = {} # type: Dict[Hashable, _LinearizerEntry] - def is_queued(self, key) -> bool: + def is_queued(self, key: Hashable) -> bool: """Checks whether there is a process queued up waiting """ entry = self.key_to_defer.get(key) @@ -234,25 +259,27 @@ class Linearizer: # There are waiting deferreds only in the OrderedDict of deferreds is # non-empty. - return bool(entry[1]) + return bool(entry.deferreds) - def queue(self, key): + def queue(self, key: Hashable) -> defer.Deferred: # we avoid doing defer.inlineCallbacks here, so that cancellation works correctly. # (https://twistedmatrix.com/trac/ticket/4632 meant that cancellations were not # propagated inside inlineCallbacks until Twisted 18.7) - entry = self.key_to_defer.setdefault(key, [0, collections.OrderedDict()]) + entry = self.key_to_defer.setdefault( + key, _LinearizerEntry(0, collections.OrderedDict()) + ) # If the number of things executing is greater than the maximum # then add a deferred to the list of blocked items # When one of the things currently executing finishes it will callback # this item so that it can continue executing. - if entry[0] >= self.max_count: + if entry.count >= self.max_count: res = self._await_lock(key) else: logger.debug( "Acquired uncontended linearizer lock %r for key %r", self.name, key ) - entry[0] += 1 + entry.count += 1 res = defer.succeed(None) # once we successfully get the lock, we need to return a context manager which @@ -267,15 +294,15 @@ class Linearizer: # We've finished executing so check if there are any things # blocked waiting to execute and start one of them - entry[0] -= 1 + entry.count -= 1 - if entry[1]: - (next_def, _) = entry[1].popitem(last=False) + if entry.deferreds: + (next_def, _) = entry.deferreds.popitem(last=False) # we need to run the next thing in the sentinel context. with PreserveLoggingContext(): next_def.callback(None) - elif entry[0] == 0: + elif entry.count == 0: # We were the last thing for this key: remove it from the # map. del self.key_to_defer[key] @@ -283,7 +310,7 @@ class Linearizer: res.addCallback(_ctx_manager) return res - def _await_lock(self, key): + def _await_lock(self, key: Hashable) -> defer.Deferred: """Helper for queue: adds a deferred to the queue Assumes that we've already checked that we've reached the limit of the number @@ -298,11 +325,11 @@ class Linearizer: logger.debug("Waiting to acquire linearizer lock %r for key %r", self.name, key) new_defer = make_deferred_yieldable(defer.Deferred()) - entry[1][new_defer] = 1 + entry.deferreds[new_defer] = 1 def cb(_r): logger.debug("Acquired linearizer lock %r for key %r", self.name, key) - entry[0] += 1 + entry.count += 1 # if the code holding the lock completes synchronously, then it # will recursively run the next claimant on the list. That can @@ -331,7 +358,7 @@ class Linearizer: ) # we just have to take ourselves back out of the queue. - del entry[1][new_defer] + del entry.deferreds[new_defer] return e new_defer.addCallbacks(cb, eb) @@ -419,14 +446,22 @@ class ReadWriteLock: return _ctx_manager() -def _cancelled_to_timed_out_error(value, timeout): +R = TypeVar("R") + + +def _cancelled_to_timed_out_error(value: R, timeout: float) -> R: if isinstance(value, failure.Failure): value.trap(CancelledError) raise defer.TimeoutError(timeout, "Deferred") return value -def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): +def timeout_deferred( + deferred: defer.Deferred, + timeout: float, + reactor: IReactorTime, + on_timeout_cancel: Optional[Callable[[Any, float], Any]] = None, +) -> defer.Deferred: """The in built twisted `Deferred.addTimeout` fails to time out deferreds that have a canceller that throws exceptions. This method creates a new deferred that wraps and times out the given deferred, correctly handling @@ -437,10 +472,10 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred Args: - deferred (Deferred) - timeout (float): Timeout in seconds - reactor (twisted.interfaces.IReactorTime): The twisted reactor to use - on_timeout_cancel (callable): A callable which is called immediately + deferred: The Deferred to potentially timeout. + timeout: Timeout in seconds + reactor: The twisted reactor to use + on_timeout_cancel: A callable which is called immediately after the deferred times out, and not if this deferred is otherwise cancelled before the timeout. @@ -452,7 +487,7 @@ def timeout_deferred(deferred, timeout, reactor, on_timeout_cancel=None): CancelledError Failure into a defer.TimeoutError. Returns: - Deferred + A new Deferred. """ new_d = defer.Deferred() diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py
index a750261e77..f73e95393c 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py
@@ -16,8 +16,6 @@ import inspect import logging from twisted.internet import defer -from twisted.internet.defer import Deferred, fail, succeed -from twisted.python import failure from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process @@ -29,11 +27,6 @@ def user_left_room(distributor, user, room_id): distributor.fire("user_left_room", user=user, room_id=room_id) -# XXX: this is no longer used. We should probably kill it. -def user_joined_room(distributor, user, room_id): - distributor.fire("user_joined_room", user=user, room_id=room_id) - - class Distributor: """A central dispatch point for loosely-connected pieces of code to register, observe, and fire signals. @@ -81,28 +74,6 @@ class Distributor: run_as_background_process(name, self.signals[name].fire, *args, **kwargs) -def maybeAwaitableDeferred(f, *args, **kw): - """ - Invoke a function that may or may not return a Deferred or an Awaitable. - - This is a modified version of twisted.internet.defer.maybeDeferred. - """ - try: - result = f(*args, **kw) - except Exception: - return fail(failure.Failure(captureVars=Deferred.debug)) - - if isinstance(result, Deferred): - return result - # Handle the additional case of an awaitable being returned. - elif inspect.isawaitable(result): - return defer.ensureDeferred(result) - elif isinstance(result, failure.Failure): - return fail(result) - else: - return succeed(result) - - class Signal: """A Signal is a dispatch point that stores a list of callables as observers of it. @@ -132,22 +103,17 @@ class Signal: Returns a Deferred that will complete when all the observers have completed.""" - def do(observer): - def eb(failure): + async def do(observer): + try: + result = observer(*args, **kwargs) + if inspect.isawaitable(result): + result = await result + return result + except Exception as e: logger.warning( - "%s signal observer %s failed: %r", - self.name, - observer, - failure, - exc_info=( - failure.type, - failure.value, - failure.getTracebackObject(), - ), + "%s signal observer %s failed: %r", self.name, observer, e, ) - return maybeAwaitableDeferred(observer, *args, **kwargs).addErrback(eb) - deferreds = [run_in_background(do, o) for o in self.observers] return make_deferred_yieldable( diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py
index 0e445e01d7..bf094c9386 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py
@@ -13,7 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from canonicaljson import json +import json + from frozendict import frozendict @@ -66,5 +67,5 @@ def _handle_frozendict(obj): # A JSONEncoder which is capable of encoding frozendicts without barfing. # Additionally reduce the whitespace produced by JSON encoding. frozendict_json_encoder = json.JSONEncoder( - default=_handle_frozendict, separators=(",", ":"), + allow_nan=False, separators=(",", ":"), default=_handle_frozendict, ) diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py
index 43c2e0ac23..cfdaa1c5d9 100644 --- a/synapse/util/threepids.py +++ b/synapse/util/threepids.py
@@ -16,11 +16,14 @@ import logging import re +from twisted.internet import defer + logger = logging.getLogger(__name__) +@defer.inlineCallbacks def check_3pid_allowed(hs, medium, address): - """Checks whether a given format of 3PID is allowed to be used on this HS + """Checks whether a given 3PID is allowed to be used on this HS Args: hs (synapse.server.HomeServer): server @@ -28,9 +31,36 @@ def check_3pid_allowed(hs, medium, address): address (str): address within that medium (e.g. "wotan@matrix.org") msisdns need to first have been canonicalised Returns: - bool: whether the 3PID medium/address is allowed to be added to this HS + defered bool: whether the 3PID medium/address is allowed to be added to this HS """ + if hs.config.check_is_for_allowed_local_3pids: + data = yield hs.get_simple_http_client().get_json( + "https://%s%s" + % ( + hs.config.check_is_for_allowed_local_3pids, + "/_matrix/identity/api/v1/internal-info", + ), + {"medium": medium, "address": address}, + ) + + # Check for invalid response + if "hs" not in data and "shadow_hs" not in data: + defer.returnValue(False) + + # Check if this user is intended to register for this homeserver + if ( + data.get("hs") != hs.config.server_name + and data.get("shadow_hs") != hs.config.server_name + ): + defer.returnValue(False) + + if data.get("requires_invite", False) and not data.get("invited", False): + # Requires an invite but hasn't been invited + defer.returnValue(False) + + defer.returnValue(True) + if hs.config.allowed_local_3pids: for constraint in hs.config.allowed_local_3pids: logger.debug( @@ -43,11 +73,11 @@ def check_3pid_allowed(hs, medium, address): if medium == constraint["medium"] and re.match( constraint["pattern"], address ): - return True + defer.returnValue(True) else: - return True + defer.returnValue(True) - return False + defer.returnValue(False) def canonicalise_email(address: str) -> str: diff --git a/sytest-blacklist b/sytest-blacklist
index 79b2d4402a..a0e72eb155 100644 --- a/sytest-blacklist +++ b/sytest-blacklist
@@ -36,3 +36,37 @@ Inbound federation of state requires event_id as a mandatory paramater # Blacklisted until https://github.com/matrix-org/synapse/pull/6486 lands Can upload self-signing keys + +# Blacklisted until MSC2753 is implemented +Local users can peek into world_readable rooms by room ID +We can't peek into rooms with shared history_visibility +We can't peek into rooms with invited history_visibility +We can't peek into rooms with joined history_visibility +Local users can peek by room alias +Peeked rooms only turn up in the sync for the device who peeked them + +# Blacklisted due to https://github.com/matrix-org/synapse-dinsic/issues/43 +Inviting an AS-hosted user asks the AS server +Accesing an AS-hosted room alias asks the AS server +Events in rooms with AS-hosted room aliases are sent to AS server + +# flaky test +If remote user leaves room we no longer receive device updates + +# flaky test +Can re-join room if re-invited + +# flaky test +Forgotten room messages cannot be paginated + +# flaky test +Local device key changes get to remote servers + +# flaky test +Old leaves are present in gapped incremental syncs + +# flaky test on workers +Old members are included in gappy incr LL sync if they start speaking + +# flaky test on workers +Presence changes to UNAVAILABLE are reported to remote room members diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py
index 3d880c499d..1471cc1a28 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py
@@ -77,11 +77,9 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock( - side_effect=lambda *args, **kwargs: make_awaitable({"v1": 9999}) - ) + fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) handler.federation_handler.do_invite_join = Mock( - side_effect=lambda *args, **kwargs: make_awaitable(("", 1)) + return_value=make_awaitable(("", 1)) ) d = handler._remote_join( @@ -110,11 +108,9 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock( - side_effect=lambda *args, **kwargs: make_awaitable({"v1": 9999}) - ) + fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) handler.federation_handler.do_invite_join = Mock( - side_effect=lambda *args, **kwargs: make_awaitable(("", 1)) + return_value=make_awaitable(("", 1)) ) d = handler._remote_join( @@ -150,11 +146,9 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock( - side_effect=lambda *args, **kwargs: make_awaitable(None) - ) + fed_transport.client.get_json = Mock(return_value=make_awaitable(None)) handler.federation_handler.do_invite_join = Mock( - side_effect=lambda *args, **kwargs: make_awaitable(("", 1)) + return_value=make_awaitable(("", 1)) ) # Artificially raise the complexity @@ -208,11 +202,9 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock( - side_effect=lambda *args, **kwargs: make_awaitable({"v1": 9999}) - ) + fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) handler.federation_handler.do_invite_join = Mock( - side_effect=lambda *args, **kwargs: make_awaitable(("", 1)) + return_value=make_awaitable(("", 1)) ) d = handler._remote_join( @@ -240,11 +232,9 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase): fed_transport = self.hs.get_federation_transport_client() # Mock out some things, because we don't want to test the whole join - fed_transport.client.get_json = Mock( - side_effect=lambda *args, **kwargs: make_awaitable({"v1": 9999}) - ) + fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) handler.federation_handler.do_invite_join = Mock( - side_effect=lambda *args, **kwargs: make_awaitable(("", 1)) + return_value=make_awaitable(("", 1)) ) d = handler._remote_join( diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py new file mode 100644
index 0000000000..6cdcc378f0 --- /dev/null +++ b/tests/federation/test_federation_catch_up.py
@@ -0,0 +1,158 @@ +from mock import Mock + +from synapse.rest import admin +from synapse.rest.client.v1 import login, room + +from tests.test_utils import event_injection, make_awaitable +from tests.unittest import FederatingHomeserverTestCase, override_config + + +class FederationCatchUpTestCases(FederatingHomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver( + federation_transport_client=Mock(spec=["send_transaction"]), + ) + + def prepare(self, reactor, clock, hs): + # stub out get_current_hosts_in_room + state_handler = hs.get_state_handler() + + # This mock is crucial for destination_rooms to be populated. + state_handler.get_current_hosts_in_room = Mock( + return_value=make_awaitable(["test", "host2"]) + ) + + # whenever send_transaction is called, record the pdu data + self.pdus = [] + self.failed_pdus = [] + self.is_online = True + self.hs.get_federation_transport_client().send_transaction.side_effect = ( + self.record_transaction + ) + + async def record_transaction(self, txn, json_cb): + if self.is_online: + data = json_cb() + self.pdus.extend(data["pdus"]) + return {} + else: + data = json_cb() + self.failed_pdus.extend(data["pdus"]) + raise IOError("Failed to connect because this is a test!") + + def get_destination_room(self, room: str, destination: str = "host2") -> dict: + """ + Gets the destination_rooms entry for a (destination, room_id) pair. + + Args: + room: room ID + destination: what destination, default is "host2" + + Returns: + Dictionary of { event_id: str, stream_ordering: int } + """ + event_id, stream_ordering = self.get_success( + self.hs.get_datastore().db_pool.execute( + "test:get_destination_rooms", + None, + """ + SELECT event_id, stream_ordering + FROM destination_rooms dr + JOIN events USING (stream_ordering) + WHERE dr.destination = ? AND dr.room_id = ? + """, + destination, + room, + ) + )[0] + return {"event_id": event_id, "stream_ordering": stream_ordering} + + @override_config({"send_federation": True}) + def test_catch_up_destination_rooms_tracking(self): + """ + Tests that we populate the `destination_rooms` table as needed. + """ + self.register_user("u1", "you the one") + u1_token = self.login("u1", "you the one") + room = self.helper.create_room_as("u1", tok=u1_token) + + self.get_success( + event_injection.inject_member_event(self.hs, room, "@user:host2", "join") + ) + + event_id_1 = self.helper.send(room, "wombats!", tok=u1_token)["event_id"] + + row_1 = self.get_destination_room(room) + + event_id_2 = self.helper.send(room, "rabbits!", tok=u1_token)["event_id"] + + row_2 = self.get_destination_room(room) + + # check: events correctly registered in order + self.assertEqual(row_1["event_id"], event_id_1) + self.assertEqual(row_2["event_id"], event_id_2) + self.assertEqual(row_1["stream_ordering"], row_2["stream_ordering"] - 1) + + @override_config({"send_federation": True}) + def test_catch_up_last_successful_stream_ordering_tracking(self): + """ + Tests that we populate the `destination_rooms` table as needed. + """ + self.register_user("u1", "you the one") + u1_token = self.login("u1", "you the one") + room = self.helper.create_room_as("u1", tok=u1_token) + + # take the remote offline + self.is_online = False + + self.get_success( + event_injection.inject_member_event(self.hs, room, "@user:host2", "join") + ) + + self.helper.send(room, "wombats!", tok=u1_token) + self.pump() + + lsso_1 = self.get_success( + self.hs.get_datastore().get_destination_last_successful_stream_ordering( + "host2" + ) + ) + + self.assertIsNone( + lsso_1, + "There should be no last successful stream ordering for an always-offline destination", + ) + + # bring the remote online + self.is_online = True + + event_id_2 = self.helper.send(room, "rabbits!", tok=u1_token)["event_id"] + + lsso_2 = self.get_success( + self.hs.get_datastore().get_destination_last_successful_stream_ordering( + "host2" + ) + ) + row_2 = self.get_destination_room(room) + + self.assertEqual( + self.pdus[0]["content"]["body"], + "rabbits!", + "Test fault: didn't receive the right PDU", + ) + self.assertEqual( + row_2["event_id"], + event_id_2, + "Test fault: destination_rooms not updated correctly", + ) + self.assertEqual( + lsso_2, + row_2["stream_ordering"], + "Send succeeded but not marked as last_successful_stream_ordering", + ) diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py
index 5f512ff8bf..917762e6b6 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py
@@ -34,7 +34,7 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): def make_homeserver(self, reactor, clock): mock_state_handler = Mock(spec=["get_current_hosts_in_room"]) # Ensure a new Awaitable is created for each call. - mock_state_handler.get_current_hosts_in_room.side_effect = lambda room_Id: make_awaitable( + mock_state_handler.get_current_hosts_in_room.return_value = make_awaitable( ["test", "host2"] ) return self.setup_test_homeserver( diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py
index c7efd3822d..97877c2e42 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py
@@ -143,7 +143,7 @@ class AuthTestCase(unittest.TestCase): def test_mau_limits_exceeded_large(self): self.auth_blocking._limit_usage_by_mau = True self.hs.get_datastore().get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.large_number_of_users) + return_value=make_awaitable(self.large_number_of_users) ) with self.assertRaises(ResourceLimitError): @@ -154,7 +154,7 @@ class AuthTestCase(unittest.TestCase): ) self.hs.get_datastore().get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.large_number_of_users) + return_value=make_awaitable(self.large_number_of_users) ) with self.assertRaises(ResourceLimitError): yield defer.ensureDeferred( @@ -169,7 +169,7 @@ class AuthTestCase(unittest.TestCase): # If not in monthly active cohort self.hs.get_datastore().get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.auth_blocking._max_mau_value) + return_value=make_awaitable(self.auth_blocking._max_mau_value) ) with self.assertRaises(ResourceLimitError): yield defer.ensureDeferred( @@ -179,7 +179,7 @@ class AuthTestCase(unittest.TestCase): ) self.hs.get_datastore().get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.auth_blocking._max_mau_value) + return_value=make_awaitable(self.auth_blocking._max_mau_value) ) with self.assertRaises(ResourceLimitError): yield defer.ensureDeferred( @@ -189,10 +189,10 @@ class AuthTestCase(unittest.TestCase): ) # If in monthly active cohort self.hs.get_datastore().user_last_seen_monthly_active = Mock( - side_effect=lambda user_id: make_awaitable(self.hs.get_clock().time_msec()) + return_value=make_awaitable(self.hs.get_clock().time_msec()) ) self.hs.get_datastore().get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.auth_blocking._max_mau_value) + return_value=make_awaitable(self.auth_blocking._max_mau_value) ) yield defer.ensureDeferred( self.auth_handler.get_access_token_for_user_id( @@ -200,10 +200,10 @@ class AuthTestCase(unittest.TestCase): ) ) self.hs.get_datastore().user_last_seen_monthly_active = Mock( - side_effect=lambda user_id: make_awaitable(self.hs.get_clock().time_msec()) + return_value=make_awaitable(self.hs.get_clock().time_msec()) ) self.hs.get_datastore().get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.auth_blocking._max_mau_value) + return_value=make_awaitable(self.auth_blocking._max_mau_value) ) yield defer.ensureDeferred( self.auth_handler.validate_short_term_login_token_and_get_user_id( @@ -216,7 +216,7 @@ class AuthTestCase(unittest.TestCase): self.auth_blocking._limit_usage_by_mau = True self.hs.get_datastore().get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.small_number_of_users) + return_value=make_awaitable(self.small_number_of_users) ) # Ensure does not raise exception yield defer.ensureDeferred( @@ -226,7 +226,7 @@ class AuthTestCase(unittest.TestCase): ) self.hs.get_datastore().get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.small_number_of_users) + return_value=make_awaitable(self.small_number_of_users) ) yield defer.ensureDeferred( self.auth_handler.validate_short_term_login_token_and_get_user_id( diff --git a/tests/handlers/test_identity.py b/tests/handlers/test_identity.py new file mode 100644
index 0000000000..0ab0356109 --- /dev/null +++ b/tests/handlers/test_identity.py
@@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import Mock + +from twisted.internet import defer + +import synapse.rest.admin +from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import account + +from tests import unittest + + +class ThreepidISRewrittenURLTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + account.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + self.address = "test@test" + self.is_server_name = "testis" + self.is_server_url = "https://testis" + self.rewritten_is_url = "https://int.testis" + + config = self.default_config() + config["trusted_third_party_id_servers"] = [self.is_server_name] + config["rewrite_identity_server_urls"] = { + self.is_server_url: self.rewritten_is_url + } + + mock_http_client = Mock(spec=["get_json", "post_json_get_json"]) + mock_http_client.get_json.side_effect = defer.succeed({}) + mock_http_client.post_json_get_json.return_value = defer.succeed( + {"address": self.address, "medium": "email"} + ) + + self.hs = self.setup_test_homeserver( + config=config, simple_http_client=mock_http_client + ) + + mock_blacklisting_http_client = Mock(spec=["get_json", "post_json_get_json"]) + mock_blacklisting_http_client.get_json.side_effect = defer.succeed({}) + mock_blacklisting_http_client.post_json_get_json.return_value = defer.succeed( + {"address": self.address, "medium": "email"} + ) + + # TODO: This class does not use a singleton to get it's http client + # This should be fixed for easier testing + # https://github.com/matrix-org/synapse-dinsic/issues/26 + self.hs.get_handlers().identity_handler.blacklisting_http_client = ( + mock_blacklisting_http_client + ) + + return self.hs + + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("kermit", "monkey") + + def test_rewritten_id_server(self): + """ + Tests that, when validating a 3PID association while rewriting the IS's server + name: + * the bind request is done against the rewritten hostname + * the original, non-rewritten, server name is stored in the database + """ + handler = self.hs.get_handlers().identity_handler + post_json_get_json = handler.blacklisting_http_client.post_json_get_json + store = self.hs.get_datastore() + + creds = {"sid": "123", "client_secret": "some_secret"} + + # Make sure processing the mocked response goes through. + data = self.get_success( + handler.bind_threepid( + client_secret=creds["client_secret"], + sid=creds["sid"], + mxid=self.user_id, + id_server=self.is_server_name, + use_v2=False, + ) + ) + self.assertEqual(data.get("address"), self.address) + + # Check that the request was done against the rewritten server name. + post_json_get_json.assert_called_once_with( + "%s/_matrix/identity/api/v1/3pid/bind" % (self.rewritten_is_url,), + { + "sid": creds["sid"], + "client_secret": creds["client_secret"], + "mxid": self.user_id, + }, + headers={}, + ) + + # Check that the original server name is saved in the database instead of the + # rewritten one. + id_servers = self.get_success( + store.get_id_servers_user_bound(self.user_id, "email", self.address) + ) + self.assertEqual(id_servers, [self.is_server_name]) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index 8e95e53d9e..1cef10feff 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py
@@ -72,7 +72,7 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_get_my_name(self): yield defer.ensureDeferred( - self.store.set_profile_displayname(self.frank.localpart, "Frank") + self.store.set_profile_displayname(self.frank.localpart, "Frank", 1) ) displayname = yield defer.ensureDeferred( @@ -120,7 +120,7 @@ class ProfileTestCase(unittest.TestCase): # Setting displayname for the first time is allowed yield defer.ensureDeferred( - self.store.set_profile_displayname(self.frank.localpart, "Frank") + self.store.set_profile_displayname(self.frank.localpart, "Frank", 1) ) self.assertEquals( @@ -173,7 +173,7 @@ class ProfileTestCase(unittest.TestCase): def test_incoming_fed_query(self): yield defer.ensureDeferred(self.store.create_profile("caroline")) yield defer.ensureDeferred( - self.store.set_profile_displayname("caroline", "Caroline") + self.store.set_profile_displayname("caroline", "Caroline", 1) ) response = yield defer.ensureDeferred( @@ -188,7 +188,7 @@ class ProfileTestCase(unittest.TestCase): def test_get_my_avatar(self): yield defer.ensureDeferred( self.store.set_profile_avatar_url( - self.frank.localpart, "http://my.server/me.png" + self.frank.localpart, "http://my.server/me.png", 1 ) ) avatar_url = yield defer.ensureDeferred(self.handler.get_avatar_url(self.frank)) @@ -239,7 +239,7 @@ class ProfileTestCase(unittest.TestCase): # Setting displayname for the first time is allowed yield defer.ensureDeferred( self.store.set_profile_avatar_url( - self.frank.localpart, "http://my.server/me.png" + self.frank.localpart, "http://my.server/me.png", 1 ) ) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index eddf5e2498..312c03c83d 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py
@@ -19,9 +19,15 @@ from synapse.api.auth import Auth from synapse.api.constants import UserTypes from synapse.api.errors import Codes, ResourceLimitError, SynapseError from synapse.handlers.register import RegistrationHandler +from synapse.http.site import SynapseRequest +from synapse.rest.client.v2_alpha.register import ( + _map_email_to_displayname, + register_servlets, +) from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import RoomAlias, UserID, create_requester +from tests.server import FakeChannel from tests.test_utils import make_awaitable from tests.unittest import override_config from tests.utils import mock_getRawHeaders @@ -37,6 +43,10 @@ class RegistrationHandlers: class RegistrationTestCase(unittest.HomeserverTestCase): """ Tests the RegistrationHandler. """ + servlets = [ + register_servlets, + ] + def make_homeserver(self, reactor, clock): hs_config = self.default_config() @@ -100,7 +110,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): def test_get_or_create_user_mau_not_blocked(self): self.hs.config.limit_usage_by_mau = True self.store.count_monthly_users = Mock( - side_effect=lambda: make_awaitable(self.hs.config.max_mau_value - 1) + return_value=make_awaitable(self.hs.config.max_mau_value - 1) ) # Ensure does not throw exception self.get_success(self.get_or_create_user(self.requester, "c", "User")) @@ -108,7 +118,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): def test_get_or_create_user_mau_blocked(self): self.hs.config.limit_usage_by_mau = True self.store.get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.lots_of_users) + return_value=make_awaitable(self.lots_of_users) ) self.get_failure( self.get_or_create_user(self.requester, "b", "display_name"), @@ -116,7 +126,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): ) self.store.get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.hs.config.max_mau_value) + return_value=make_awaitable(self.hs.config.max_mau_value) ) self.get_failure( self.get_or_create_user(self.requester, "b", "display_name"), @@ -126,14 +136,14 @@ class RegistrationTestCase(unittest.HomeserverTestCase): def test_register_mau_blocked(self): self.hs.config.limit_usage_by_mau = True self.store.get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.lots_of_users) + return_value=make_awaitable(self.lots_of_users) ) self.get_failure( self.handler.register_user(localpart="local_part"), ResourceLimitError ) self.store.get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.hs.config.max_mau_value) + return_value=make_awaitable(self.hs.config.max_mau_value) ) self.get_failure( self.handler.register_user(localpart="local_part"), ResourceLimitError @@ -523,6 +533,104 @@ class RegistrationTestCase(unittest.HomeserverTestCase): self.assertTrue(requester.shadow_banned) + def test_email_to_displayname_mapping(self): + """Test that custom emails are mapped to new user displaynames correctly""" + self._check_mapping( + "jack-phillips.rivers@big-org.com", "Jack-Phillips Rivers [Big-Org]" + ) + + self._check_mapping("bob.jones@matrix.org", "Bob Jones [Tchap Admin]") + + self._check_mapping("bob-jones.blabla@gouv.fr", "Bob-Jones Blabla [Gouv]") + + # Multibyte unicode characters + self._check_mapping( + "j\u030a\u0065an-poppy.seed@example.com", + "J\u030a\u0065an-Poppy Seed [Example]", + ) + + def _check_mapping(self, i, expected): + result = _map_email_to_displayname(i) + self.assertEqual(result, expected) + + @override_config( + { + "bind_new_user_emails_to_sydent": "https://is.example.com", + "registrations_require_3pid": ["email"], + "account_threepid_delegates": {}, + "email": { + "smtp_host": "127.0.0.1", + "smtp_port": 20, + "require_transport_security": False, + "smtp_user": None, + "smtp_pass": None, + "notif_from": "test@example.com", + }, + "public_baseurl": "http://localhost", + } + ) + def test_user_email_bound_via_sydent_internal_api(self): + """Tests that emails are bound after registration if this option is set""" + # Register user with an email address + email = "alice@example.com" + + # Mock Synapse's threepid validator + get_threepid_validation_session = Mock( + return_value=make_awaitable( + {"medium": "email", "address": email, "validated_at": 0} + ) + ) + self.store.get_threepid_validation_session = get_threepid_validation_session + delete_threepid_session = Mock(return_value=make_awaitable(None)) + self.store.delete_threepid_session = delete_threepid_session + + # Mock Synapse's http json post method to check for the internal bind call + post_json_get_json = Mock(return_value=make_awaitable(None)) + self.hs.get_simple_http_client().post_json_get_json = post_json_get_json + + # Retrieve a UIA session ID + channel = self.uia_register( + 401, {"username": "alice", "password": "nobodywillguessthis"} + ) + session_id = channel.json_body["session"] + + # Register our email address using the fake validation session above + channel = self.uia_register( + 200, + { + "username": "alice", + "password": "nobodywillguessthis", + "auth": { + "session": session_id, + "type": "m.login.email.identity", + "threepid_creds": {"sid": "blabla", "client_secret": "blablabla"}, + }, + }, + ) + self.assertEqual(channel.json_body["user_id"], "@alice:test") + + # Check that a bind attempt was made to our fake identity server + post_json_get_json.assert_called_with( + "https://is.example.com/_matrix/identity/internal/bind", + {"address": "alice@example.com", "medium": "email", "mxid": "@alice:test"}, + ) + + # Check that we stored a mapping of this bind + bound_threepids = self.get_success( + self.store.user_get_bound_threepids("@alice:test") + ) + self.assertListEqual(bound_threepids, [{"medium": "email", "address": email}]) + + def uia_register(self, expected_response: int, body: dict) -> FakeChannel: + """Make a register request.""" + request, channel = self.make_request( + "POST", "register", body + ) # type: SynapseRequest, FakeChannel + self.render(request) + + self.assertEqual(request.code, expected_response) + return channel + async def get_or_create_user( self, requester, localpart, displayname, password_hash=None ): diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index a609f148c0..0229f58315 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py
@@ -21,8 +21,14 @@ from tests import unittest # The expected number of state events in a fresh public room. EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM = 5 + # The expected number of state events in a fresh private room. -EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM = 6 +# +# Note: we increase this by 2 on the dinsic branch as we send +# a "im.vector.room.access_rules" state event into new private rooms, +# and an encryption state event as all private rooms are encrypted +# by default +EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM = 7 class StatsRoomTests(unittest.HomeserverTestCase): @@ -54,7 +60,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): self.store.db_pool.simple_insert( "background_updates", { - "update_name": "populate_stats_process_rooms_2", + "update_name": "populate_stats_process_rooms", "progress_json": "{}", "depends_on": "populate_stats_prepare", }, @@ -66,7 +72,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): { "update_name": "populate_stats_process_users", "progress_json": "{}", - "depends_on": "populate_stats_process_rooms_2", + "depends_on": "populate_stats_process_rooms", }, ) ) @@ -219,10 +225,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): self.get_success( self.store.db_pool.simple_insert( "background_updates", - { - "update_name": "populate_stats_process_rooms_2", - "progress_json": "{}", - }, + {"update_name": "populate_stats_process_rooms", "progress_json": "{}"}, ) ) self.get_success( @@ -231,7 +234,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): { "update_name": "populate_stats_cleanup", "progress_json": "{}", - "depends_on": "populate_stats_process_rooms_2", + "depends_on": "populate_stats_process_rooms", }, ) ) @@ -728,7 +731,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): self.store.db_pool.simple_insert( "background_updates", { - "update_name": "populate_stats_process_rooms_2", + "update_name": "populate_stats_process_rooms", "progress_json": "{}", "depends_on": "populate_stats_prepare", }, @@ -740,7 +743,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): { "update_name": "populate_stats_process_users", "progress_json": "{}", - "depends_on": "populate_stats_process_rooms_2", + "depends_on": "populate_stats_process_rooms", }, ) ) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 7bf15c4ba9..f306a09bfa 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py
@@ -80,6 +80,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): "get_user_directory_stream_pos", "get_current_state_deltas", "get_device_updates_by_remote", + "get_room_max_stream_ordering", ] ) @@ -116,7 +117,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): retry_timings_res ) - self.datastore.get_device_updates_by_remote.side_effect = lambda destination, from_stream_id, limit: make_awaitable( + self.datastore.get_device_updates_by_remote.return_value = make_awaitable( (0, []) ) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 87be94111f..48f750d357 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py
@@ -19,7 +19,7 @@ from twisted.internet import defer import synapse.rest.admin from synapse.api.constants import EventTypes, RoomEncryptionAlgorithms, UserTypes from synapse.rest.client.v1 import login, room -from synapse.rest.client.v2_alpha import user_directory +from synapse.rest.client.v2_alpha import account, account_validity, user_directory from synapse.storage.roommember import ProfileInfo from tests import unittest @@ -549,3 +549,136 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) == 0) + + +class UserInfoTestCase(unittest.FederatingHomeserverTestCase): + servlets = [ + login.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + account_validity.register_servlets, + synapse.rest.client.v2_alpha.user_directory.register_servlets, + account.register_servlets, + ] + + def default_config(self): + config = super().default_config() + + # Set accounts to expire after a week + config["account_validity"] = { + "enabled": True, + "period": 604800000, # Time in ms for 1 week + } + return config + + def prepare(self, reactor, clock, hs): + super(UserInfoTestCase, self).prepare(reactor, clock, hs) + self.store = hs.get_datastore() + self.handler = hs.get_user_directory_handler() + + def test_user_info(self): + """Test /users/info for local users from the Client-Server API""" + user_one, user_two, user_three, user_three_token = self.setup_test_users() + + # Request info about each user from user_three + request, channel = self.make_request( + "POST", + path="/_matrix/client/unstable/users/info", + content={"user_ids": [user_one, user_two, user_three]}, + access_token=user_three_token, + shorthand=False, + ) + self.render(request) + self.assertEquals(200, channel.code, channel.result) + + # Check the state of user_one matches + user_one_info = channel.json_body[user_one] + self.assertTrue(user_one_info["deactivated"]) + self.assertFalse(user_one_info["expired"]) + + # Check the state of user_two matches + user_two_info = channel.json_body[user_two] + self.assertFalse(user_two_info["deactivated"]) + self.assertTrue(user_two_info["expired"]) + + # Check the state of user_three matches + user_three_info = channel.json_body[user_three] + self.assertFalse(user_three_info["deactivated"]) + self.assertFalse(user_three_info["expired"]) + + def test_user_info_federation(self): + """Test that /users/info can be called from the Federation API, and + and that we can query remote users from the Client-Server API + """ + user_one, user_two, user_three, user_three_token = self.setup_test_users() + + # Request information about our local users from the perspective of a remote server + request, channel = self.make_request( + "POST", + path="/_matrix/federation/unstable/users/info", + content={"user_ids": [user_one, user_two, user_three]}, + ) + self.render(request) + self.assertEquals(200, channel.code) + + # Check the state of user_one matches + user_one_info = channel.json_body[user_one] + self.assertTrue(user_one_info["deactivated"]) + self.assertFalse(user_one_info["expired"]) + + # Check the state of user_two matches + user_two_info = channel.json_body[user_two] + self.assertFalse(user_two_info["deactivated"]) + self.assertTrue(user_two_info["expired"]) + + # Check the state of user_three matches + user_three_info = channel.json_body[user_three] + self.assertFalse(user_three_info["deactivated"]) + self.assertFalse(user_three_info["expired"]) + + def setup_test_users(self): + """Create an admin user and three test users, each with a different state""" + + # Create an admin user to expire other users with + self.register_user("admin", "adminpassword", admin=True) + admin_token = self.login("admin", "adminpassword") + + # Create three users + user_one = self.register_user("alice", "pass") + user_one_token = self.login("alice", "pass") + user_two = self.register_user("bob", "pass") + user_three = self.register_user("carl", "pass") + user_three_token = self.login("carl", "pass") + + # Deactivate user_one + self.deactivate(user_one, user_one_token) + + # Expire user_two + self.expire(user_two, admin_token) + + # Do nothing to user_three + + return user_one, user_two, user_three, user_three_token + + def expire(self, user_id_to_expire, admin_tok): + url = "/_matrix/client/unstable/admin/account_validity/validity" + request_data = { + "user_id": user_id_to_expire, + "expiration_ts": 0, + "enable_renewal_emails": False, + } + request, channel = self.make_request( + "POST", url, request_data, access_token=admin_tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + def deactivate(self, user_id, tok): + request_data = { + "auth": {"type": "m.login.password", "user": user_id, "password": "pass"}, + "erase": False, + } + request, channel = self.make_request( + "POST", "account/deactivate", request_data, access_token=tok + ) + self.render(request) + self.assertEqual(request.code, 200) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
index 8b5ad4574f..c3f7a28dcc 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -101,7 +101,7 @@ class MatrixFederationAgentTests(unittest.TestCase): self.agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=self.tls_factory, + tls_client_options_factory=FederationPolicyForHTTPS(config), user_agent="test-agent", # Note that this is unused since _well_known_resolver is provided. _srv_resolver=self.mock_resolver, _well_known_resolver=self.well_known_resolver, diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index 04de0b9dbe..ccbb82f6a3 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py
@@ -12,16 +12,28 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock +from synapse.events import EventBase from synapse.module_api import ModuleApi +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.types import create_requester from tests.unittest import HomeserverTestCase class ModuleApiTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() self.module_api = ModuleApi(homeserver, homeserver.get_auth_handler()) + self.event_creation_handler = homeserver.get_event_creation_handler() def test_can_register_user(self): """Tests that an external module can register a user""" @@ -52,3 +64,137 @@ class ModuleApiTestCase(HomeserverTestCase): # Check that the displayname was assigned displayname = self.get_success(self.store.get_profile_displayname("bob")) self.assertEqual(displayname, "Bobberino") + + def test_sending_events_into_room(self): + """Tests that a module can send events into a room""" + # Mock out create_and_send_nonmember_event to check whether events are being sent + self.event_creation_handler.create_and_send_nonmember_event = Mock( + spec=[], + side_effect=self.event_creation_handler.create_and_send_nonmember_event, + ) + + # Create a user and room to play with + user_id = self.register_user("summer", "monkey") + tok = self.login("summer", "monkey") + room_id = self.helper.create_room_as(user_id, tok=tok) + + # Create and send a non-state event + content = {"body": "I am a puppet", "msgtype": "m.text"} + event_dict = { + "room_id": room_id, + "type": "m.room.message", + "content": content, + "sender": user_id, + } + event = self.get_success( + self.module_api.create_and_send_event_into_room(event_dict) + ) # type: EventBase + self.assertEqual(event.sender, user_id) + self.assertEqual(event.type, "m.room.message") + self.assertEqual(event.room_id, room_id) + self.assertFalse(hasattr(event, "state_key")) + self.assertDictEqual(event.content, content) + + # Check that the event was sent + self.event_creation_handler.create_and_send_nonmember_event.assert_called_with( + create_requester(user_id), event_dict, ratelimit=False, + ) + + # Create and send a state event + content = { + "events_default": 0, + "users": {user_id: 100}, + "state_default": 50, + "users_default": 0, + "events": {"test.event.type": 25}, + } + event_dict = { + "room_id": room_id, + "type": "m.room.power_levels", + "content": content, + "sender": user_id, + "state_key": "", + } + event = self.get_success( + self.module_api.create_and_send_event_into_room(event_dict) + ) # type: EventBase + self.assertEqual(event.sender, user_id) + self.assertEqual(event.type, "m.room.power_levels") + self.assertEqual(event.room_id, room_id) + self.assertEqual(event.state_key, "") + self.assertDictEqual(event.content, content) + + # Check that the event was sent + self.event_creation_handler.create_and_send_nonmember_event.assert_called_with( + create_requester(user_id), + { + "type": "m.room.power_levels", + "content": content, + "room_id": room_id, + "sender": user_id, + "state_key": "", + }, + ratelimit=False, + ) + + # Check that we can't send membership events + content = { + "membership": "leave", + } + event_dict = { + "room_id": room_id, + "type": "m.room.member", + "content": content, + "sender": user_id, + "state_key": user_id, + } + self.get_failure( + self.module_api.create_and_send_event_into_room(event_dict), Exception + ) + + def test_public_rooms(self): + """Tests that a room can be added and removed from the public rooms list, + as well as have its public rooms directory state queried. + """ + # Create a user and room to play with + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + room_id = self.helper.create_room_as(user_id, tok=tok) + + # The room should not currently be in the public rooms directory + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertFalse(is_in_public_rooms) + + # Let's try adding it to the public rooms directory + self.get_success( + self.module_api.public_room_list_manager.add_room_to_public_room_list( + room_id + ) + ) + + # And checking whether it's in there... + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertTrue(is_in_public_rooms) + + # Let's remove it again + self.get_success( + self.module_api.public_room_list_manager.remove_room_from_public_room_list( + room_id + ) + ) + + # Should be gone + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertFalse(is_in_public_rooms) diff --git a/tests/push/test_http.py b/tests/push/test_http.py
index b567868b02..2f56cacc7a 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py
@@ -346,8 +346,8 @@ class HTTPPusherTests(HomeserverTestCase): self.assertEqual(len(self.push_attempts), 2) self.assertEqual(self.push_attempts[1][1], "example.com") - # check that this is low-priority - self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low") + # check that this is high-priority + self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "high") def test_sends_high_priority_for_mention(self): """ @@ -418,8 +418,8 @@ class HTTPPusherTests(HomeserverTestCase): self.assertEqual(len(self.push_attempts), 2) self.assertEqual(self.push_attempts[1][1], "example.com") - # check that this is low-priority - self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low") + # check that this is high-priority + self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "high") def test_sends_high_priority_for_atroom(self): """ @@ -497,5 +497,5 @@ class HTTPPusherTests(HomeserverTestCase): self.assertEqual(len(self.push_attempts), 2) self.assertEqual(self.push_attempts[1][1], "example.com") - # check that this is low-priority - self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low") + # check that this is high-priority + self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "high") diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py
index 8b4982ecb1..1d7edee5ba 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py
@@ -45,7 +45,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): new event. """ mock_client = Mock(spec=["put_json"]) - mock_client.put_json.side_effect = lambda *_, **__: make_awaitable({}) + mock_client.put_json.return_value = make_awaitable({}) self.make_worker_hs( "synapse.app.federation_sender", @@ -73,7 +73,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): new events. """ mock_client1 = Mock(spec=["put_json"]) - mock_client1.put_json.side_effect = lambda *_, **__: make_awaitable({}) + mock_client1.put_json.return_value = make_awaitable({}) self.make_worker_hs( "synapse.app.federation_sender", { @@ -85,7 +85,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): ) mock_client2 = Mock(spec=["put_json"]) - mock_client2.put_json.side_effect = lambda *_, **__: make_awaitable({}) + mock_client2.put_json.return_value = make_awaitable({}) self.make_worker_hs( "synapse.app.federation_sender", { @@ -136,7 +136,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): new typing EDUs. """ mock_client1 = Mock(spec=["put_json"]) - mock_client1.put_json.side_effect = lambda *_, **__: make_awaitable({}) + mock_client1.put_json.return_value = make_awaitable({}) self.make_worker_hs( "synapse.app.federation_sender", { @@ -148,7 +148,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): ) mock_client2 = Mock(spec=["put_json"]) - mock_client2.put_json.side_effect = lambda *_, **__: make_awaitable({}) + mock_client2.put_json.return_value = make_awaitable({}) self.make_worker_hs( "synapse.app.federation_sender", { diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 160c630235..b8b7758d24 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py
@@ -337,7 +337,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # Set monthly active users to the limit store.get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.hs.config.max_mau_value) + return_value=make_awaitable(self.hs.config.max_mau_value) ) # Check that the blocking of monthly active users is working as expected # The registration of a new user fails due to the limit @@ -591,7 +591,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): # Set monthly active users to the limit self.store.get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.hs.config.max_mau_value) + return_value=make_awaitable(self.hs.config.max_mau_value) ) # Check that the blocking of monthly active users is working as expected # The registration of a new user fails due to the limit @@ -631,7 +631,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): # Set monthly active users to the limit self.store.get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(self.hs.config.max_mau_value) + return_value=make_awaitable(self.hs.config.max_mau_value) ) # Check that the blocking of monthly active users is working as expected # The registration of a new user fails due to the limit diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py
index c973521907..4224b0a92e 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py
@@ -15,15 +15,22 @@ import json +from mock import Mock + +from twisted.internet import defer + import synapse.rest.admin from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import account from tests import unittest -class IdentityTestCase(unittest.HomeserverTestCase): +class IdentityDisabledTestCase(unittest.HomeserverTestCase): + """Tests that 3PID lookup attempts fail when the HS's config disallows them.""" servlets = [ + account.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, @@ -32,24 +39,111 @@ class IdentityTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() + config["trusted_third_party_id_servers"] = ["testis"] config["enable_3pid_lookup"] = False self.hs = self.setup_test_homeserver(config=config) return self.hs + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + def test_3pid_invite_disabled(self): + request, channel = self.make_request( + b"POST", "/createRoom", b"{}", access_token=self.tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + room_id = channel.json_body["room_id"] + + params = { + "id_server": "testis", + "medium": "email", + "address": "test@example.com", + } + request_data = json.dumps(params) + request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii") + request, channel = self.make_request( + b"POST", request_url, request_data, access_token=self.tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"403", channel.result) + def test_3pid_lookup_disabled(self): - self.hs.config.enable_3pid_lookup = False + url = ( + "/_matrix/client/unstable/account/3pid/lookup" + "?id_server=testis&medium=email&address=foo@bar.baz" + ) + request, channel = self.make_request("GET", url, access_token=self.tok) + self.render(request) + self.assertEqual(channel.result["code"], b"403", channel.result) + + def test_3pid_bulk_lookup_disabled(self): + url = "/_matrix/client/unstable/account/3pid/bulk_lookup" + data = { + "id_server": "testis", + "threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]], + } + request_data = json.dumps(data) + request, channel = self.make_request( + "POST", url, request_data, access_token=self.tok + ) + self.render(request) + self.assertEqual(channel.result["code"], b"403", channel.result) + + +class IdentityEnabledTestCase(unittest.HomeserverTestCase): + """Tests that 3PID lookup attempts succeed when the HS's config allows them.""" + + servlets = [ + account.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + ] - self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["enable_3pid_lookup"] = True + config["trusted_third_party_id_servers"] = ["testis"] + + mock_http_client = Mock(spec=["get_json", "post_json_get_json"]) + mock_http_client.get_json.return_value = defer.succeed((200, "{}")) + mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}")) + + self.hs = self.setup_test_homeserver( + config=config, simple_http_client=mock_http_client + ) + + # TODO: This class does not use a singleton to get it's http client + # This should be fixed for easier testing + # https://github.com/matrix-org/synapse-dinsic/issues/26 + self.hs.get_handlers().identity_handler.http_client = mock_http_client + + return self.hs + + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + def test_3pid_invite_enabled(self): request, channel = self.make_request( - b"POST", "/createRoom", b"{}", access_token=tok + b"POST", "/createRoom", b"{}", access_token=self.tok ) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) room_id = channel.json_body["room_id"] + # Replace the blacklisting SimpleHttpClient with our mock + self.hs.get_room_member_handler().simple_http_client = Mock( + spec=["get_json", "post_json_get_json"] + ) + self.hs.get_room_member_handler().simple_http_client.get_json.return_value = defer.succeed( + (200, "{}") + ) + params = { "id_server": "testis", "medium": "email", @@ -58,7 +152,44 @@ class IdentityTestCase(unittest.HomeserverTestCase): request_data = json.dumps(params) request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii") request, channel = self.make_request( - b"POST", request_url, request_data, access_token=tok + b"POST", request_url, request_data, access_token=self.tok ) self.render(request) - self.assertEquals(channel.result["code"], b"403", channel.result) + + get_json = self.hs.get_handlers().identity_handler.http_client.get_json + get_json.assert_called_once_with( + "https://testis/_matrix/identity/api/v1/lookup", + {"address": "test@example.com", "medium": "email"}, + ) + + def test_3pid_lookup_enabled(self): + url = ( + "/_matrix/client/unstable/account/3pid/lookup" + "?id_server=testis&medium=email&address=foo@bar.baz" + ) + request, channel = self.make_request("GET", url, access_token=self.tok) + self.render(request) + + get_json = self.hs.get_simple_http_client().get_json + get_json.assert_called_once_with( + "https://testis/_matrix/identity/api/v1/lookup", + {"address": "foo@bar.baz", "medium": "email"}, + ) + + def test_3pid_bulk_lookup_enabled(self): + url = "/_matrix/client/unstable/account/3pid/bulk_lookup" + data = { + "id_server": "testis", + "threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]], + } + request_data = json.dumps(data) + request, channel = self.make_request( + "POST", url, request_data, access_token=self.tok + ) + self.render(request) + + post_json = self.hs.get_simple_http_client().post_json_get_json + post_json.assert_called_once_with( + "https://testis/_matrix/identity/api/v1/bulk_lookup", + {"threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]]}, + ) diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
index 7d3773ff78..47c0d5634c 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py
@@ -34,6 +34,7 @@ class RetentionTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() + config["default_room_version"] = "1" config["retention"] = { "enabled": True, "default_policy": { @@ -243,6 +244,7 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() + config["default_room_version"] = "1" config["retention"] = { "enabled": True, } diff --git a/tests/rest/client/test_room_access_rules.py b/tests/rest/client/test_room_access_rules.py new file mode 100644
index 0000000000..de7856fba9 --- /dev/null +++ b/tests/rest/client/test_room_access_rules.py
@@ -0,0 +1,1066 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import random +import string +from typing import Optional + +from mock import Mock + +from twisted.internet import defer + +from synapse.api.constants import EventTypes, JoinRules, Membership, RoomCreationPreset +from synapse.rest import admin +from synapse.rest.client.v1 import directory, login, room +from synapse.third_party_rules.access_rules import ( + ACCESS_RULES_TYPE, + AccessRules, + RoomAccessRules, +) +from synapse.types import JsonDict, create_requester + +from tests import unittest + + +class RoomAccessTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + directory.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + + config["third_party_event_rules"] = { + "module": "synapse.third_party_rules.access_rules.RoomAccessRules", + "config": { + "domains_forbidden_when_restricted": ["forbidden_domain"], + "id_server": "testis", + }, + } + config["trusted_third_party_id_servers"] = ["testis"] + + def send_invite(destination, room_id, event_id, pdu): + return defer.succeed(pdu) + + def get_json(uri, args={}, headers=None): + address_domain = args["address"].split("@")[1] + return defer.succeed({"hs": address_domain}) + + def post_json_get_json(uri, post_json, args={}, headers=None): + token = "".join(random.choice(string.ascii_letters) for _ in range(10)) + return defer.succeed( + { + "token": token, + "public_keys": [ + { + "public_key": "serverpublickey", + "key_validity_url": "https://testis/pubkey/isvalid", + }, + { + "public_key": "phemeralpublickey", + "key_validity_url": "https://testis/pubkey/ephemeral/isvalid", + }, + ], + "display_name": "f...@b...", + } + ) + + mock_federation_client = Mock(spec=["send_invite"]) + mock_federation_client.send_invite.side_effect = send_invite + + mock_http_client = Mock(spec=["get_json", "post_json_get_json"],) + # Mocking the response for /info on the IS API. + mock_http_client.get_json.side_effect = get_json + # Mocking the response for /store-invite on the IS API. + mock_http_client.post_json_get_json.side_effect = post_json_get_json + self.hs = self.setup_test_homeserver( + config=config, + federation_client=mock_federation_client, + simple_http_client=mock_http_client, + ) + + # TODO: This class does not use a singleton to get it's http client + # This should be fixed for easier testing + # https://github.com/matrix-org/synapse-dinsic/issues/26 + self.hs.get_handlers().identity_handler.blacklisting_http_client = ( + mock_http_client + ) + + self.third_party_event_rules = self.hs.get_third_party_event_rules() + + return self.hs + + def prepare(self, reactor, clock, homeserver): + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + self.restricted_room = self.create_room() + self.unrestricted_room = self.create_room(rule=AccessRules.UNRESTRICTED) + self.direct_rooms = [ + self.create_room(direct=True), + self.create_room(direct=True), + self.create_room(direct=True), + ] + + self.invitee_id = self.register_user("invitee", "test") + self.invitee_tok = self.login("invitee", "test") + + self.helper.invite( + room=self.direct_rooms[0], + src=self.user_id, + targ=self.invitee_id, + tok=self.tok, + ) + + def test_create_room_no_rule(self): + """Tests that creating a room with no rule will set the default.""" + room_id = self.create_room() + rule = self.current_rule_in_room(room_id) + + self.assertEqual(rule, AccessRules.RESTRICTED) + + def test_create_room_direct_no_rule(self): + """Tests that creating a direct room with no rule will set the default.""" + room_id = self.create_room(direct=True) + rule = self.current_rule_in_room(room_id) + + self.assertEqual(rule, AccessRules.DIRECT) + + def test_create_room_valid_rule(self): + """Tests that creating a room with a valid rule will set the right.""" + room_id = self.create_room(rule=AccessRules.UNRESTRICTED) + rule = self.current_rule_in_room(room_id) + + self.assertEqual(rule, AccessRules.UNRESTRICTED) + + def test_create_room_invalid_rule(self): + """Tests that creating a room with an invalid rule will set fail.""" + self.create_room(rule=AccessRules.DIRECT, expected_code=400) + + def test_create_room_direct_invalid_rule(self): + """Tests that creating a direct room with an invalid rule will fail. + """ + self.create_room(direct=True, rule=AccessRules.RESTRICTED, expected_code=400) + + def test_create_room_default_power_level_rules(self): + """Tests that a room created with no power level overrides instead uses the dinum + defaults + """ + room_id = self.create_room(direct=True, rule=AccessRules.DIRECT) + power_levels = self.helper.get_state(room_id, "m.room.power_levels", self.tok) + + # Inviting another user should require PL50, even in private rooms + self.assertEqual(power_levels["invite"], 50) + # Sending arbitrary state events should require PL100 + self.assertEqual(power_levels["state_default"], 100) + + def test_create_room_fails_on_incorrect_power_level_rules(self): + """Tests that a room created with power levels lower than that required are rejected""" + modified_power_levels = RoomAccessRules._get_default_power_levels(self.user_id) + modified_power_levels["invite"] = 0 + modified_power_levels["state_default"] = 50 + + self.create_room( + direct=True, + rule=AccessRules.DIRECT, + initial_state=[ + {"type": "m.room.power_levels", "content": modified_power_levels} + ], + expected_code=400, + ) + + def test_existing_room_can_change_power_levels(self): + """Tests that a room created with default power levels can have their power levels + dropped after room creation + """ + # Creates a room with the default power levels + room_id = self.create_room( + direct=True, rule=AccessRules.DIRECT, expected_code=200, + ) + + # Attempt to drop invite and state_default power levels after the fact + room_power_levels = self.helper.get_state( + room_id, "m.room.power_levels", self.tok + ) + room_power_levels["invite"] = 0 + room_power_levels["state_default"] = 50 + self.helper.send_state( + room_id, "m.room.power_levels", room_power_levels, self.tok + ) + + def test_public_room(self): + """Tests that it's only possible to have a room listed in the public room list + if the access rule is restricted. + """ + # Creating a room with the public_chat preset should succeed and set the access + # rule to restricted. + preset_room_id = self.create_room(preset=RoomCreationPreset.PUBLIC_CHAT) + self.assertEqual( + self.current_rule_in_room(preset_room_id), AccessRules.RESTRICTED + ) + + # Creating a room with the public join rule in its initial state should succeed + # and set the access rule to restricted. + init_state_room_id = self.create_room( + initial_state=[ + { + "type": "m.room.join_rules", + "content": {"join_rule": JoinRules.PUBLIC}, + } + ] + ) + self.assertEqual( + self.current_rule_in_room(init_state_room_id), AccessRules.RESTRICTED + ) + + # List preset_room_id in the public room list + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/directory/list/room/%s" % (preset_room_id,), + {"visibility": "public"}, + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + # List init_state_room_id in the public room list + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/directory/list/room/%s" % (init_state_room_id,), + {"visibility": "public"}, + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + # Changing access rule to unrestricted should fail. + self.change_rule_in_room( + preset_room_id, AccessRules.UNRESTRICTED, expected_code=403 + ) + self.change_rule_in_room( + init_state_room_id, AccessRules.UNRESTRICTED, expected_code=403 + ) + + # Changing access rule to direct should fail. + self.change_rule_in_room(preset_room_id, AccessRules.DIRECT, expected_code=403) + self.change_rule_in_room( + init_state_room_id, AccessRules.DIRECT, expected_code=403 + ) + + # Creating a new room with the public_chat preset and an access rule of direct + # should fail. + self.create_room( + preset=RoomCreationPreset.PUBLIC_CHAT, + rule=AccessRules.DIRECT, + expected_code=400, + ) + + # Changing join rule to public in an direct room should fail. + self.change_join_rule_in_room( + self.direct_rooms[0], JoinRules.PUBLIC, expected_code=403 + ) + + def test_restricted(self): + """Tests that in restricted mode we're unable to invite users from blacklisted + servers but can invite other users. + + Also tests that the room can be published to, and removed from, the public room + list. + """ + # We can't invite a user from a forbidden HS. + self.helper.invite( + room=self.restricted_room, + src=self.user_id, + targ="@test:forbidden_domain", + tok=self.tok, + expect_code=403, + ) + + # We can invite a user which HS isn't forbidden. + self.helper.invite( + room=self.restricted_room, + src=self.user_id, + targ="@test:allowed_domain", + tok=self.tok, + expect_code=200, + ) + + # We can't send a 3PID invite to an address that is mapped to a forbidden HS. + self.send_threepid_invite( + address="test@forbidden_domain", + room_id=self.restricted_room, + expected_code=403, + ) + + # We can send a 3PID invite to an address that is mapped to an HS that's not + # forbidden. + self.send_threepid_invite( + address="test@allowed_domain", + room_id=self.restricted_room, + expected_code=200, + ) + + # We are allowed to publish the room to the public room list + url = "/_matrix/client/r0/directory/list/room/%s" % self.restricted_room + data = {"visibility": "public"} + + request, channel = self.make_request("PUT", url, data, access_token=self.tok) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + # We are allowed to remove the room from the public room list + url = "/_matrix/client/r0/directory/list/room/%s" % self.restricted_room + data = {"visibility": "private"} + + request, channel = self.make_request("PUT", url, data, access_token=self.tok) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + def test_direct(self): + """Tests that, in direct mode, other users than the initial two can't be invited, + but the following scenario works: + * invited user joins the room + * invited user leaves the room + * room creator re-invites invited user + + Tests that a user from a HS that's in the list of forbidden domains (to use + in restricted mode) can be invited. + + Tests that the room cannot be published to the public room list. + """ + not_invited_user = "@not_invited:forbidden_domain" + + # We can't invite a new user to the room. + self.helper.invite( + room=self.direct_rooms[0], + src=self.user_id, + targ=not_invited_user, + tok=self.tok, + expect_code=403, + ) + + # The invited user can join the room. + self.helper.join( + room=self.direct_rooms[0], + user=self.invitee_id, + tok=self.invitee_tok, + expect_code=200, + ) + + # The invited user can leave the room. + self.helper.leave( + room=self.direct_rooms[0], + user=self.invitee_id, + tok=self.invitee_tok, + expect_code=200, + ) + + # The invited user can be re-invited to the room. + self.helper.invite( + room=self.direct_rooms[0], + src=self.user_id, + targ=self.invitee_id, + tok=self.tok, + expect_code=200, + ) + + # If we're alone in the room and have always been the only member, we can invite + # someone. + self.helper.invite( + room=self.direct_rooms[1], + src=self.user_id, + targ=not_invited_user, + tok=self.tok, + expect_code=200, + ) + + # Disable the 3pid invite ratelimiter + burst = self.hs.config.rc_third_party_invite.burst_count + per_second = self.hs.config.rc_third_party_invite.per_second + self.hs.config.rc_third_party_invite.burst_count = 10 + self.hs.config.rc_third_party_invite.per_second = 0.1 + + # We can't send a 3PID invite to a room that already has two members. + self.send_threepid_invite( + address="test@allowed_domain", + room_id=self.direct_rooms[0], + expected_code=403, + ) + + # We can't send a 3PID invite to a room that already has a pending invite. + self.send_threepid_invite( + address="test@allowed_domain", + room_id=self.direct_rooms[1], + expected_code=403, + ) + + # We can send a 3PID invite to a room in which we've always been the only member. + self.send_threepid_invite( + address="test@forbidden_domain", + room_id=self.direct_rooms[2], + expected_code=200, + ) + + # We can send a 3PID invite to a room in which there's a 3PID invite. + self.send_threepid_invite( + address="test@forbidden_domain", + room_id=self.direct_rooms[2], + expected_code=403, + ) + + self.hs.config.rc_third_party_invite.burst_count = burst + self.hs.config.rc_third_party_invite.per_second = per_second + + # We can't publish the room to the public room list + url = "/_matrix/client/r0/directory/list/room/%s" % self.direct_rooms[0] + data = {"visibility": "public"} + + request, channel = self.make_request("PUT", url, data, access_token=self.tok) + self.render(request) + self.assertEqual(channel.code, 403, channel.result) + + def test_unrestricted(self): + """Tests that, in unrestricted mode, we can invite whoever we want, but we can + only change the power level of users that wouldn't be forbidden in restricted + mode. + + Tests that the room cannot be published to the public room list. + """ + # We can invite + self.helper.invite( + room=self.unrestricted_room, + src=self.user_id, + targ="@test:forbidden_domain", + tok=self.tok, + expect_code=200, + ) + + self.helper.invite( + room=self.unrestricted_room, + src=self.user_id, + targ="@test:not_forbidden_domain", + tok=self.tok, + expect_code=200, + ) + + # We can send a 3PID invite to an address that is mapped to a forbidden HS. + self.send_threepid_invite( + address="test@forbidden_domain", + room_id=self.unrestricted_room, + expected_code=200, + ) + + # We can send a 3PID invite to an address that is mapped to an HS that's not + # forbidden. + self.send_threepid_invite( + address="test@allowed_domain", + room_id=self.unrestricted_room, + expected_code=200, + ) + + # We can send a power level event that doesn't redefine the default PL or set a + # non-default PL for a user that would be forbidden in restricted mode. + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.PowerLevels, + body={"users": {self.user_id: 100, "@test:not_forbidden_domain": 10}}, + tok=self.tok, + expect_code=200, + ) + + # We can't send a power level event that redefines the default PL and doesn't set + # a non-default PL for a user that would be forbidden in restricted mode. + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.PowerLevels, + body={ + "users": {self.user_id: 100, "@test:not_forbidden_domain": 10}, + "users_default": 10, + }, + tok=self.tok, + expect_code=403, + ) + + # We can't send a power level event that doesn't redefines the default PL but sets + # a non-default PL for a user that would be forbidden in restricted mode. + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.PowerLevels, + body={"users": {self.user_id: 100, "@test:forbidden_domain": 10}}, + tok=self.tok, + expect_code=403, + ) + + # We can't publish the room to the public room list + url = "/_matrix/client/r0/directory/list/room/%s" % self.unrestricted_room + data = {"visibility": "public"} + + request, channel = self.make_request("PUT", url, data, access_token=self.tok) + self.render(request) + self.assertEqual(channel.code, 403, channel.result) + + def test_change_rules(self): + """Tests that we can only change the current rule from restricted to + unrestricted. + """ + # We can't change the rule from restricted to direct. + self.change_rule_in_room( + room_id=self.restricted_room, new_rule=AccessRules.DIRECT, expected_code=403 + ) + + # We can change the rule from restricted to unrestricted. + # Note that this changes self.restricted_room to an unrestricted room + self.change_rule_in_room( + room_id=self.restricted_room, + new_rule=AccessRules.UNRESTRICTED, + expected_code=200, + ) + + # We can't change the rule from unrestricted to restricted. + self.change_rule_in_room( + room_id=self.unrestricted_room, + new_rule=AccessRules.RESTRICTED, + expected_code=403, + ) + + # We can't change the rule from unrestricted to direct. + self.change_rule_in_room( + room_id=self.unrestricted_room, + new_rule=AccessRules.DIRECT, + expected_code=403, + ) + + # We can't change the rule from direct to restricted. + self.change_rule_in_room( + room_id=self.direct_rooms[0], + new_rule=AccessRules.RESTRICTED, + expected_code=403, + ) + + # We can't change the rule from direct to unrestricted. + self.change_rule_in_room( + room_id=self.direct_rooms[0], + new_rule=AccessRules.UNRESTRICTED, + expected_code=403, + ) + + # We can't publish a room to the public room list and then change its rule to + # unrestricted + + # Create a restricted room + test_room_id = self.create_room(rule=AccessRules.RESTRICTED) + + # Publish the room to the public room list + url = "/_matrix/client/r0/directory/list/room/%s" % test_room_id + data = {"visibility": "public"} + + request, channel = self.make_request("PUT", url, data, access_token=self.tok) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + # Attempt to switch the room to "unrestricted" + self.change_rule_in_room( + room_id=test_room_id, new_rule=AccessRules.UNRESTRICTED, expected_code=403 + ) + + # Attempt to switch the room to "direct" + self.change_rule_in_room( + room_id=test_room_id, new_rule=AccessRules.DIRECT, expected_code=403 + ) + + def test_change_room_avatar(self): + """Tests that changing the room avatar is always allowed unless the room is a + direct chat, in which case it's forbidden. + """ + + avatar_content = { + "info": {"h": 398, "mimetype": "image/jpeg", "size": 31037, "w": 394}, + "url": "mxc://example.org/JWEIFJgwEIhweiWJE", + } + + self.helper.send_state( + room_id=self.restricted_room, + event_type=EventTypes.RoomAvatar, + body=avatar_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.RoomAvatar, + body=avatar_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.direct_rooms[0], + event_type=EventTypes.RoomAvatar, + body=avatar_content, + tok=self.tok, + expect_code=403, + ) + + def test_change_room_name(self): + """Tests that changing the room name is always allowed unless the room is a direct + chat, in which case it's forbidden. + """ + + name_content = {"name": "My super room"} + + self.helper.send_state( + room_id=self.restricted_room, + event_type=EventTypes.Name, + body=name_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.Name, + body=name_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.direct_rooms[0], + event_type=EventTypes.Name, + body=name_content, + tok=self.tok, + expect_code=403, + ) + + def test_change_room_topic(self): + """Tests that changing the room topic is always allowed unless the room is a + direct chat, in which case it's forbidden. + """ + + topic_content = {"topic": "Welcome to this room"} + + self.helper.send_state( + room_id=self.restricted_room, + event_type=EventTypes.Topic, + body=topic_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.Topic, + body=topic_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.direct_rooms[0], + event_type=EventTypes.Topic, + body=topic_content, + tok=self.tok, + expect_code=403, + ) + + def test_revoke_3pid_invite_direct(self): + """Tests that revoking a 3PID invite doesn't cause the room access rules module to + confuse the revokation as a new 3PID invite. + """ + invite_token = "sometoken" + + invite_body = { + "display_name": "ker...@exa...", + "public_keys": [ + { + "key_validity_url": "https://validity_url", + "public_key": "ta8IQ0u1sp44HVpxYi7dFOdS/bfwDjcy4xLFlfY5KOA", + }, + { + "key_validity_url": "https://validity_url", + "public_key": "4_9nzEeDwR5N9s51jPodBiLnqH43A2_g2InVT137t9I", + }, + ], + "key_validity_url": "https://validity_url", + "public_key": "ta8IQ0u1sp44HVpxYi7dFOdS/bfwDjcy4xLFlfY5KOA", + } + + self.send_state_with_state_key( + room_id=self.direct_rooms[1], + event_type=EventTypes.ThirdPartyInvite, + state_key=invite_token, + body=invite_body, + tok=self.tok, + ) + + self.send_state_with_state_key( + room_id=self.direct_rooms[1], + event_type=EventTypes.ThirdPartyInvite, + state_key=invite_token, + body={}, + tok=self.tok, + ) + + invite_token = "someothertoken" + + self.send_state_with_state_key( + room_id=self.direct_rooms[1], + event_type=EventTypes.ThirdPartyInvite, + state_key=invite_token, + body=invite_body, + tok=self.tok, + ) + + def test_check_event_allowed(self): + """Tests that RoomAccessRules.check_event_allowed behaves accordingly. + + It tests that: + * forbidden users cannot join restricted rooms. + * forbidden users can only join unrestricted rooms if they have an invite. + """ + event_creator = self.hs.get_event_creation_handler() + + # Test that forbidden users cannot join restricted rooms + requester = create_requester(self.user_id) + allowed_requester = create_requester("@user:allowed_domain") + forbidden_requester = create_requester("@user:forbidden_domain") + + # Create a join event for a forbidden user + forbidden_join_event, forbidden_join_event_context = self.get_success( + event_creator.create_event( + forbidden_requester, + { + "type": EventTypes.Member, + "room_id": self.restricted_room, + "sender": forbidden_requester.user.to_string(), + "content": {"membership": Membership.JOIN}, + "state_key": forbidden_requester.user.to_string(), + }, + ) + ) + + # Create a join event for an allowed user + allowed_join_event, allowed_join_event_context = self.get_success( + event_creator.create_event( + allowed_requester, + { + "type": EventTypes.Member, + "room_id": self.restricted_room, + "sender": allowed_requester.user.to_string(), + "content": {"membership": Membership.JOIN}, + "state_key": allowed_requester.user.to_string(), + }, + ) + ) + + # Assert a join event from a forbidden user to a restricted room is rejected + can_join = self.get_success( + self.third_party_event_rules.check_event_allowed( + forbidden_join_event, forbidden_join_event_context + ) + ) + self.assertFalse(can_join) + + # But a join event from an non-forbidden user to a restricted room is allowed + can_join = self.get_success( + self.third_party_event_rules.check_event_allowed( + allowed_join_event, allowed_join_event_context + ) + ) + self.assertTrue(can_join) + + # Test that forbidden users can only join unrestricted rooms if they have an invite + + # Recreate the forbidden join event for the unrestricted room instead + forbidden_join_event, forbidden_join_event_context = self.get_success( + event_creator.create_event( + forbidden_requester, + { + "type": EventTypes.Member, + "room_id": self.unrestricted_room, + "sender": forbidden_requester.user.to_string(), + "content": {"membership": Membership.JOIN}, + "state_key": forbidden_requester.user.to_string(), + }, + ) + ) + + # A forbidden user without an invite should not be able to join an unrestricted room + can_join = self.get_success( + self.third_party_event_rules.check_event_allowed( + forbidden_join_event, forbidden_join_event_context + ) + ) + self.assertFalse(can_join) + + # However, if we then invite this user... + self.helper.invite( + room=self.unrestricted_room, + src=requester.user.to_string(), + targ=forbidden_requester.user.to_string(), + tok=self.tok, + ) + + # And create another join event, making sure that its context states it's coming + # in after the above invite was made... + forbidden_join_event, forbidden_join_event_context = self.get_success( + event_creator.create_event( + forbidden_requester, + { + "type": EventTypes.Member, + "room_id": self.unrestricted_room, + "sender": forbidden_requester.user.to_string(), + "content": {"membership": Membership.JOIN}, + "state_key": forbidden_requester.user.to_string(), + }, + ) + ) + + # Then the forbidden user should be able to join! + can_join = self.get_success( + self.third_party_event_rules.check_event_allowed( + forbidden_join_event, forbidden_join_event_context + ) + ) + self.assertTrue(can_join) + + def test_freezing_a_room(self): + """Tests that the power levels in a room change to prevent new events from + non-admin users when the last admin of a room leaves. + """ + + def freeze_room_with_id_and_power_levels( + room_id: str, custom_power_levels_content: Optional[JsonDict] = None, + ): + # Invite a user to the room, they join with PL 0 + self.helper.invite( + room=room_id, src=self.user_id, targ=self.invitee_id, tok=self.tok, + ) + + # Invitee joins the room + self.helper.join( + room=room_id, user=self.invitee_id, tok=self.invitee_tok, + ) + + if not custom_power_levels_content: + # Retrieve the room's current power levels event content + power_levels = self.helper.get_state( + room_id=room_id, event_type="m.room.power_levels", tok=self.tok, + ) + else: + power_levels = custom_power_levels_content + + # Override the room's power levels with the given power levels content + self.helper.send_state( + room_id=room_id, + event_type="m.room.power_levels", + body=custom_power_levels_content, + tok=self.tok, + ) + + # Ensure that the invitee leaving the room does not change the power levels + self.helper.leave( + room=room_id, user=self.invitee_id, tok=self.invitee_tok, + ) + + # Retrieve the new power levels of the room + new_power_levels = self.helper.get_state( + room_id=room_id, event_type="m.room.power_levels", tok=self.tok, + ) + + # Ensure they have not changed + self.assertDictEqual(power_levels, new_power_levels) + + # Invite the user back again + self.helper.invite( + room=room_id, src=self.user_id, targ=self.invitee_id, tok=self.tok, + ) + + # Invitee joins the room + self.helper.join( + room=room_id, user=self.invitee_id, tok=self.invitee_tok, + ) + + # Now the admin leaves the room + self.helper.leave( + room=room_id, user=self.user_id, tok=self.tok, + ) + + # Check the power levels again + new_power_levels = self.helper.get_state( + room_id=room_id, event_type="m.room.power_levels", tok=self.invitee_tok, + ) + + # Ensure that the new power levels prevent anyone but admins from sending + # certain events + self.assertEquals(new_power_levels["state_default"], 100) + self.assertEquals(new_power_levels["events_default"], 100) + self.assertEquals(new_power_levels["kick"], 100) + self.assertEquals(new_power_levels["invite"], 100) + self.assertEquals(new_power_levels["ban"], 100) + self.assertEquals(new_power_levels["redact"], 100) + self.assertDictEqual(new_power_levels["events"], {}) + self.assertDictEqual(new_power_levels["users"], {self.user_id: 100}) + + # Ensure new users entering the room aren't going to immediately become admins + self.assertEquals(new_power_levels["users_default"], 0) + + # Test that freezing a room with the default power level state event content works + room1 = self.create_room() + freeze_room_with_id_and_power_levels(room1) + + # Test that freezing a room with a power level state event that is missing + # `state_default` and `event_default` keys behaves as expected + room2 = self.create_room() + freeze_room_with_id_and_power_levels( + room2, + { + "ban": 50, + "events": { + "m.room.avatar": 50, + "m.room.canonical_alias": 50, + "m.room.history_visibility": 100, + "m.room.name": 50, + "m.room.power_levels": 100, + }, + "invite": 0, + "kick": 50, + "redact": 50, + "users": {self.user_id: 100}, + "users_default": 0, + # Explicitly remove `state_default` and `event_default` keys + }, + ) + + # Test that freezing a room with a power level state event that is *additionally* + # missing `ban`, `invite`, `kick` and `redact` keys behaves as expected + room3 = self.create_room() + freeze_room_with_id_and_power_levels( + room3, + { + "events": { + "m.room.avatar": 50, + "m.room.canonical_alias": 50, + "m.room.history_visibility": 100, + "m.room.name": 50, + "m.room.power_levels": 100, + }, + "users": {self.user_id: 100}, + "users_default": 0, + # Explicitly remove `state_default` and `event_default` keys + # Explicitly remove `ban`, `invite`, `kick` and `redact` keys + }, + ) + + def create_room( + self, + direct=False, + rule=None, + preset=RoomCreationPreset.TRUSTED_PRIVATE_CHAT, + initial_state=None, + expected_code=200, + ): + content = {"is_direct": direct, "preset": preset} + + if rule: + content["initial_state"] = [ + {"type": ACCESS_RULES_TYPE, "state_key": "", "content": {"rule": rule}} + ] + + if initial_state: + if "initial_state" not in content: + content["initial_state"] = [] + + content["initial_state"] += initial_state + + request, channel = self.make_request( + "POST", "/_matrix/client/r0/createRoom", content, access_token=self.tok, + ) + self.render(request) + + self.assertEqual(channel.code, expected_code, channel.result) + + if expected_code == 200: + return channel.json_body["room_id"] + + def current_rule_in_room(self, room_id): + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, ACCESS_RULES_TYPE), + access_token=self.tok, + ) + self.render(request) + + self.assertEqual(channel.code, 200, channel.result) + return channel.json_body["rule"] + + def change_rule_in_room(self, room_id, new_rule, expected_code=200): + data = {"rule": new_rule} + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, ACCESS_RULES_TYPE), + json.dumps(data), + access_token=self.tok, + ) + self.render(request) + + self.assertEqual(channel.code, expected_code, channel.result) + + def change_join_rule_in_room(self, room_id, new_join_rule, expected_code=200): + data = {"join_rule": new_join_rule} + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, EventTypes.JoinRules), + json.dumps(data), + access_token=self.tok, + ) + self.render(request) + + self.assertEqual(channel.code, expected_code, channel.result) + + def send_threepid_invite(self, address, room_id, expected_code=200): + params = {"id_server": "testis", "medium": "email", "address": address} + + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/%s/invite" % room_id, + json.dumps(params), + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + + def send_state_with_state_key( + self, room_id, event_type, state_key, body, tok, expect_code=200 + ): + path = "/_matrix/client/r0/rooms/%s/state/%s/%s" % ( + room_id, + event_type, + state_key, + ) + + request, channel = self.make_request( + "PUT", path, json.dumps(body), access_token=tok + ) + self.render(request) + + self.assertEqual(channel.code, expect_code, channel.result) + + return channel.json_body diff --git a/tests/rest/client/third_party_rules.py b/tests/rest/client/third_party_rules.py
index 8c24add530..715e87de08 100644 --- a/tests/rest/client/third_party_rules.py +++ b/tests/rest/client/third_party_rules.py
@@ -12,18 +12,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.types import Requester from tests import unittest class ThirdPartyRulesTestModule: - def __init__(self, config): + def __init__(self, config, *args, **kwargs): pass - def check_event_allowed(self, event, context): + async def on_create_room( + self, requester: Requester, config: dict, is_requester_admin: bool + ): + return True + + async def check_event_allowed(self, event, context): if event.type == "foo.bar.forbidden": return False else: @@ -51,29 +56,31 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): self.hs = self.setup_test_homeserver(config=config) return self.hs + def prepare(self, reactor, clock, homeserver): + # Create a user and room to play with during the tests + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + def test_third_party_rules(self): """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ - user_id = self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") - - room_id = self.helper.create_room_as(user_id, tok=tok) - request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % room_id, + "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id, {}, - access_token=tok, + access_token=self.tok, ) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % room_id, + "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, {}, - access_token=tok, + access_token=self.tok, ) self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) diff --git a/tests/rest/client/v1/test_push_rule_attrs.py b/tests/rest/client/v1/test_push_rule_attrs.py new file mode 100644
index 0000000000..081052f6a6 --- /dev/null +++ b/tests/rest/client/v1/test_push_rule_attrs.py
@@ -0,0 +1,448 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import synapse +from synapse.api.errors import Codes +from synapse.rest.client.v1 import login, push_rule, room + +from tests.unittest import HomeserverTestCase + + +class PushRuleAttributesTestCase(HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + push_rule.register_servlets, + ] + hijack_auth = False + + def test_enabled_on_creation(self): + """ + Tests the GET and PUT of push rules' `enabled` endpoints. + Tests that a rule is enabled upon creation, even though a rule with that + ruleId existed previously and was disabled. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # GET enabled for that new rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["enabled"], True) + + def test_enabled_on_recreation(self): + """ + Tests the GET and PUT of push rules' `enabled` endpoints. + Tests that a rule is enabled upon creation, even if a rule with that + ruleId existed previously and was disabled. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # disable the rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/enabled", + {"enabled": False}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # check rule disabled + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["enabled"], False) + + # DELETE the rule + request, channel = self.make_request( + "DELETE", "/pushrules/global/override/best.friend", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # GET enabled for that new rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["enabled"], True) + + def test_enabled_disable(self): + """ + Tests the GET and PUT of push rules' `enabled` endpoints. + Tests that a rule is disabled and enabled when we ask for it. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # disable the rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/enabled", + {"enabled": False}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # check rule disabled + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["enabled"], False) + + # re-enable the rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/enabled", + {"enabled": True}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # check rule enabled + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["enabled"], True) + + def test_enabled_404_when_get_non_existent(self): + """ + Tests that `enabled` gives 404 when the rule doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # check 404 for never-heard-of rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # GET enabled for that new rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # DELETE the rule + request, channel = self.make_request( + "DELETE", "/pushrules/global/override/best.friend", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # check 404 for deleted rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_enabled_404_when_get_non_existent_server_rule(self): + """ + Tests that `enabled` gives 404 when the server-default rule doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # check 404 for never-heard-of rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/.m.muahahaha/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_enabled_404_when_put_non_existent_rule(self): + """ + Tests that `enabled` gives 404 when we put to a rule that doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # enable & check 404 for never-heard-of rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/enabled", + {"enabled": True}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_enabled_404_when_put_non_existent_server_rule(self): + """ + Tests that `enabled` gives 404 when we put to a server-default rule that doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # enable & check 404 for never-heard-of rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/.m.muahahah/enabled", + {"enabled": True}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_actions_get(self): + """ + Tests that `actions` gives you what you expect on a fresh rule. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # GET actions for that new rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/actions", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual( + channel.json_body["actions"], ["notify", {"set_tweak": "highlight"}] + ) + + def test_actions_put(self): + """ + Tests that PUT on actions updates the value you'd get from GET. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # change the rule actions + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/actions", + {"actions": ["dont_notify"]}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # GET actions for that new rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/actions", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["actions"], ["dont_notify"]) + + def test_actions_404_when_get_non_existent(self): + """ + Tests that `actions` gives 404 when the rule doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + body = { + "conditions": [ + {"kind": "event_match", "key": "sender", "pattern": "@user2:hs"} + ], + "actions": ["notify", {"set_tweak": "highlight"}], + } + + # check 404 for never-heard-of rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + # PUT a new rule + request, channel = self.make_request( + "PUT", "/pushrules/global/override/best.friend", body, access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # DELETE the rule + request, channel = self.make_request( + "DELETE", "/pushrules/global/override/best.friend", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 200) + + # check 404 for deleted rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/best.friend/enabled", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_actions_404_when_get_non_existent_server_rule(self): + """ + Tests that `actions` gives 404 when the server-default rule doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # check 404 for never-heard-of rule + request, channel = self.make_request( + "GET", "/pushrules/global/override/.m.muahahaha/actions", access_token=token + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_actions_404_when_put_non_existent_rule(self): + """ + Tests that `actions` gives 404 when putting to a rule that doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # enable & check 404 for never-heard-of rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend/actions", + {"actions": ["dont_notify"]}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) + + def test_actions_404_when_put_non_existent_server_rule(self): + """ + Tests that `actions` gives 404 when putting to a server-default rule that doesn't exist. + """ + self.register_user("user", "pass") + token = self.login("user", "pass") + + # enable & check 404 for never-heard-of rule + request, channel = self.make_request( + "PUT", + "/pushrules/global/override/.m.muahahah/actions", + {"actions": ["dont_notify"]}, + access_token=token, + ) + self.render(request) + self.assertEqual(channel.code, 404) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py
index 152a5182fa..93f899d861 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py
@@ -14,11 +14,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import json import os import re from email.parser import Parser +from typing import Optional +from urllib.parse import urlencode import pkg_resources @@ -27,8 +28,10 @@ from synapse.api.constants import LoginType, Membership from synapse.api.errors import Codes from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import account, register +from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource from tests import unittest +from tests.unittest import override_config class PasswordResetTestCase(unittest.HomeserverTestCase): @@ -69,6 +72,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() + self.submit_token_resource = PasswordResetSubmitTokenResource(hs) def test_basic_password_reset(self): """Test basic password reset flow @@ -250,8 +254,32 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): # Remove the host path = link.replace("https://example.com", "") + # Load the password reset confirmation page request, channel = self.make_request("GET", path, shorthand=False) - self.render(request) + request.render(self.submit_token_resource) + self.pump() + self.assertEquals(200, channel.code, channel.result) + + # Now POST to the same endpoint, mimicking the same behaviour as clicking the + # password reset confirm button + + # Send arguments as url-encoded form data, matching the template's behaviour + form_args = [] + for key, value_list in request.args.items(): + for value in value_list: + arg = (key, value) + form_args.append(arg) + + # Confirm the password reset + request, channel = self.make_request( + "POST", + path, + content=urlencode(form_args).encode("utf8"), + shorthand=False, + content_is_form=True, + ) + request.render(self.submit_token_resource) + self.pump() self.assertEquals(200, channel.code, channel.result) def _get_link_from_email(self): @@ -668,16 +696,104 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) - def _request_token(self, email, client_secret): + @override_config({"next_link_domain_whitelist": None}) + def test_next_link(self): + """Tests a valid next_link parameter value with no whitelist (good case)""" + self._request_token( + "something@example.com", + "some_secret", + next_link="https://example.com/a/good/site", + expect_code=200, + ) + + @override_config({"next_link_domain_whitelist": None}) + def test_next_link_exotic_protocol(self): + """Tests using a esoteric protocol as a next_link parameter value. + Someone may be hosting a client on IPFS etc. + """ + self._request_token( + "something@example.com", + "some_secret", + next_link="some-protocol://abcdefghijklmopqrstuvwxyz", + expect_code=200, + ) + + @override_config({"next_link_domain_whitelist": None}) + def test_next_link_file_uri(self): + """Tests next_link parameters cannot be file URI""" + # Attempt to use a next_link value that points to the local disk + self._request_token( + "something@example.com", + "some_secret", + next_link="file:///host/path", + expect_code=400, + ) + + @override_config({"next_link_domain_whitelist": ["example.com", "example.org"]}) + def test_next_link_domain_whitelist(self): + """Tests next_link parameters must fit the whitelist if provided""" + self._request_token( + "something@example.com", + "some_secret", + next_link="https://example.com/some/good/page", + expect_code=200, + ) + + self._request_token( + "something@example.com", + "some_secret", + next_link="https://example.org/some/also/good/page", + expect_code=200, + ) + + self._request_token( + "something@example.com", + "some_secret", + next_link="https://bad.example.org/some/bad/page", + expect_code=400, + ) + + @override_config({"next_link_domain_whitelist": []}) + def test_empty_next_link_domain_whitelist(self): + """Tests an empty next_lint_domain_whitelist value, meaning next_link is essentially + disallowed + """ + self._request_token( + "something@example.com", + "some_secret", + next_link="https://example.com/a/page", + expect_code=400, + ) + + def _request_token( + self, + email: str, + client_secret: str, + next_link: Optional[str] = None, + expect_code: int = 200, + ) -> str: + """Request a validation token to add an email address to a user's account + + Args: + email: The email address to validate + client_secret: A secret string + next_link: A link to redirect the user to after validation + expect_code: Expected return code of the call + + Returns: + The ID of the new threepid validation session + """ + body = {"client_secret": client_secret, "email": email, "send_attempt": 1} + if next_link: + body["next_link"] = next_link + request, channel = self.make_request( - "POST", - b"account/3pid/email/requestToken", - {"client_secret": client_secret, "email": email, "send_attempt": 1}, + "POST", b"account/3pid/email/requestToken", body, ) self.render(request) - self.assertEquals(200, channel.code, channel.result) + self.assertEquals(expect_code, channel.code, channel.result) - return channel.json_body["sid"] + return channel.json_body.get("sid") def _request_token_invalid_email( self, email, expected_errcode, expected_error, client_secret="foobar", diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index 2fc3a60fc5..ecf697e5e0 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py
@@ -19,8 +19,12 @@ import datetime import json import os +from mock import Mock + import pkg_resources +from twisted.internet import defer + import synapse.rest.admin from synapse.api.constants import LoginType from synapse.api.errors import Codes @@ -87,14 +91,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.assertEquals(channel.result["code"], b"400", channel.result) self.assertEquals(channel.json_body["error"], "Invalid password") - def test_POST_bad_username(self): - request_data = json.dumps({"username": 777, "password": "monkey"}) - request, channel = self.make_request(b"POST", self.url, request_data) - self.render(request) - - self.assertEquals(channel.result["code"], b"400", channel.result) - self.assertEquals(channel.json_body["error"], "Invalid username") - def test_POST_user_valid(self): user_id = "@kermit:test" device_id = "frogfone" @@ -303,6 +299,47 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.assertIsNotNone(channel.json_body.get("sid")) +class RegisterHideProfileTestCase(unittest.HomeserverTestCase): + + servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource] + + def make_homeserver(self, reactor, clock): + + self.url = b"/_matrix/client/r0/register" + + config = self.default_config() + config["enable_registration"] = True + config["show_users_in_user_directory"] = False + config["replicate_user_profiles_to"] = ["fakeserver"] + + mock_http_client = Mock(spec=["get_json", "post_json_get_json"]) + mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}")) + + self.hs = self.setup_test_homeserver( + config=config, simple_http_client=mock_http_client + ) + + return self.hs + + def test_profile_hidden(self): + user_id = self.register_user("kermit", "monkey") + + post_json = self.hs.get_simple_http_client().post_json_get_json + + # We expect post_json_get_json to have been called twice: once with the original + # profile and once with the None profile resulting from the request to hide it + # from the user directory. + self.assertEqual(post_json.call_count, 2, post_json.call_args_list) + + # Get the args (and not kwargs) passed to post_json. + args = post_json.call_args[0] + # Make sure the last call was attempting to replicate profiles. + split_uri = args[0].split("/") + self.assertEqual(split_uri[len(split_uri) - 1], "replicate_profiles", args[0]) + # Make sure the last profile update was overriding the user's profile to None. + self.assertEqual(args[1]["batch"][user_id], None, args[1]) + + class AccountValidityTestCase(unittest.HomeserverTestCase): servlets = [ @@ -312,6 +349,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): sync.register_servlets, logout.register_servlets, account_validity.register_servlets, + account.register_servlets, ] def make_homeserver(self, reactor, clock): @@ -437,6 +475,155 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): self.assertEquals(channel.result["code"], b"200", channel.result) +class AccountValidityUserDirectoryTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.client.v1.profile.register_servlets, + synapse.rest.client.v1.room.register_servlets, + synapse.rest.client.v2_alpha.user_directory.register_servlets, + login.register_servlets, + register.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + account_validity.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + + # Set accounts to expire after a week + config["enable_registration"] = True + config["account_validity"] = { + "enabled": True, + "period": 604800000, # Time in ms for 1 week + } + config["replicate_user_profiles_to"] = "test.is" + + # Mock homeserver requests to an identity server + mock_http_client = Mock(spec=["post_json_get_json"]) + mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}")) + + self.hs = self.setup_test_homeserver( + config=config, simple_http_client=mock_http_client + ) + + return self.hs + + def test_expired_user_in_directory(self): + """Test that an expired user is hidden in the user directory""" + # Create an admin user to search the user directory + admin_id = self.register_user("admin", "adminpassword", admin=True) + admin_tok = self.login("admin", "adminpassword") + + # Ensure the admin never expires + url = "/_matrix/client/unstable/admin/account_validity/validity" + params = { + "user_id": admin_id, + "expiration_ts": 999999999999, + "enable_renewal_emails": False, + } + request_data = json.dumps(params) + request, channel = self.make_request( + b"POST", url, request_data, access_token=admin_tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + # Mock the homeserver's HTTP client + post_json = self.hs.get_simple_http_client().post_json_get_json + + # Create a user + username = "kermit" + user_id = self.register_user(username, "monkey") + self.login(username, "monkey") + self.get_success( + self.hs.get_datastore().set_profile_displayname(username, "mr.kermit", 1) + ) + + # Check that a full profile for this user is replicated + self.assertIsNotNone(post_json.call_args, post_json.call_args) + payload = post_json.call_args[0][1] + batch = payload.get("batch") + + self.assertIsNotNone(batch, batch) + self.assertEquals(len(batch), 1, batch) + + replicated_user_id = list(batch.keys())[0] + self.assertEquals(replicated_user_id, user_id, replicated_user_id) + + # There was replicated information about our user + # Check that it's not None + replicated_content = batch[user_id] + self.assertIsNotNone(replicated_content) + + # Expire the user + url = "/_matrix/client/unstable/admin/account_validity/validity" + params = { + "user_id": user_id, + "expiration_ts": 0, + "enable_renewal_emails": False, + } + request_data = json.dumps(params) + request, channel = self.make_request( + b"POST", url, request_data, access_token=admin_tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + # Wait for the background job to run which hides expired users in the directory + self.reactor.advance(60 * 60 * 1000) + + # Check if the homeserver has replicated the user's profile to the identity server + self.assertIsNotNone(post_json.call_args, post_json.call_args) + payload = post_json.call_args[0][1] + batch = payload.get("batch") + + self.assertIsNotNone(batch, batch) + self.assertEquals(len(batch), 1, batch) + + replicated_user_id = list(batch.keys())[0] + self.assertEquals(replicated_user_id, user_id, replicated_user_id) + + # There was replicated information about our user + # Check that it's None, signifying that the user should be removed from the user + # directory because they were expired + replicated_content = batch[user_id] + self.assertIsNone(replicated_content) + + # Now renew the user, and check they get replicated again to the identity server + url = "/_matrix/client/unstable/admin/account_validity/validity" + params = { + "user_id": user_id, + "expiration_ts": 99999999999, + "enable_renewal_emails": False, + } + request_data = json.dumps(params) + request, channel = self.make_request( + b"POST", url, request_data, access_token=admin_tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + self.pump(10) + self.reactor.advance(10) + self.pump() + + # Check if the homeserver has replicated the user's profile to the identity server + post_json = self.hs.get_simple_http_client().post_json_get_json + self.assertNotEquals(post_json.call_args, None, post_json.call_args) + payload = post_json.call_args[0][1] + batch = payload.get("batch") + self.assertNotEquals(batch, None, batch) + self.assertEquals(len(batch), 1, batch) + replicated_user_id = list(batch.keys())[0] + self.assertEquals(replicated_user_id, user_id, replicated_user_id) + + # There was replicated information about our user + # Check that it's not None, signifying that the user is back in the user + # directory + replicated_content = batch[user_id] + self.assertIsNotNone(replicated_content) + + class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): servlets = [ @@ -587,7 +774,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): "POST", "account/deactivate", request_data, access_token=tok ) self.render(request) - self.assertEqual(request.code, 200) + self.assertEqual(request.code, 200, channel.result) self.reactor.advance(datetime.timedelta(days=8).total_seconds()) diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py
index f4f3e56777..5f897d49cf 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py
@@ -120,12 +120,13 @@ class _TestImage: extension = attr.ib(type=bytes) expected_cropped = attr.ib(type=Optional[bytes]) expected_scaled = attr.ib(type=Optional[bytes]) + expected_found = attr.ib(default=True, type=bool) @parameterized_class( ("test_image",), [ - # smol png + # smoll png ( _TestImage( unhexlify( @@ -161,6 +162,8 @@ class _TestImage: None, ), ), + # an empty file + (_TestImage(b"", b"image/gif", b".gif", None, None, False,),), ], ) class MediaRepoTests(unittest.HomeserverTestCase): @@ -303,12 +306,16 @@ class MediaRepoTests(unittest.HomeserverTestCase): self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None) def test_thumbnail_crop(self): - self._test_thumbnail("crop", self.test_image.expected_cropped) + self._test_thumbnail( + "crop", self.test_image.expected_cropped, self.test_image.expected_found + ) def test_thumbnail_scale(self): - self._test_thumbnail("scale", self.test_image.expected_scaled) + self._test_thumbnail( + "scale", self.test_image.expected_scaled, self.test_image.expected_found + ) - def _test_thumbnail(self, method, expected_body): + def _test_thumbnail(self, method, expected_body, expected_found): params = "?width=32&height=32&method=" + method request, channel = self.make_request( "GET", self.media_id + params, shorthand=False @@ -325,11 +332,23 @@ class MediaRepoTests(unittest.HomeserverTestCase): ) self.pump() - self.assertEqual(channel.code, 200) - if expected_body is not None: + if expected_found: + self.assertEqual(channel.code, 200) + if expected_body is not None: + self.assertEqual( + channel.result["body"], expected_body, channel.result["body"] + ) + else: + # ensure that the result is at least some valid image + Image.open(BytesIO(channel.result["body"])) + else: + # A 404 with a JSON body. + self.assertEqual(channel.code, 404) self.assertEqual( - channel.result["body"], expected_body, channel.result["body"] + channel.json_body, + { + "errcode": "M_NOT_FOUND", + "error": "Not found [b'example.com', b'12345?width=32&height=32&method=%s']" + % method, + }, ) - else: - # ensure that the result is at least some valid image - Image.open(BytesIO(channel.result["body"])) diff --git a/tests/rulecheck/__init__.py b/tests/rulecheck/__init__.py new file mode 100644
index 0000000000..a354d38ca8 --- /dev/null +++ b/tests/rulecheck/__init__.py
@@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/rulecheck/test_domainrulecheck.py b/tests/rulecheck/test_domainrulecheck.py new file mode 100644
index 0000000000..1accc70dc9 --- /dev/null +++ b/tests/rulecheck/test_domainrulecheck.py
@@ -0,0 +1,334 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json + +import synapse.rest.admin +from synapse.config._base import ConfigError +from synapse.rest.client.v1 import login, room +from synapse.rulecheck.domain_rule_checker import DomainRuleChecker + +from tests import unittest +from tests.server import make_request, render + + +class DomainRuleCheckerTestCase(unittest.TestCase): + def test_allowed(self): + config = { + "default": False, + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + }, + "domains_prevented_from_being_invited_to_published_rooms": ["target_two"], + } + check = DomainRuleChecker(config) + self.assertTrue( + check.user_may_invite( + "test:source_one", "test:target_one", None, "room", False + ) + ) + self.assertTrue( + check.user_may_invite( + "test:source_one", "test:target_two", None, "room", False + ) + ) + self.assertTrue( + check.user_may_invite( + "test:source_two", "test:target_two", None, "room", False + ) + ) + + # User can invite internal user to a published room + self.assertTrue( + check.user_may_invite( + "test:source_one", "test1:target_one", None, "room", False, True + ) + ) + + # User can invite external user to a non-published room + self.assertTrue( + check.user_may_invite( + "test:source_one", "test:target_two", None, "room", False, False + ) + ) + + def test_disallowed(self): + config = { + "default": True, + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + "source_four": [], + }, + } + check = DomainRuleChecker(config) + self.assertFalse( + check.user_may_invite( + "test:source_one", "test:target_three", None, "room", False + ) + ) + self.assertFalse( + check.user_may_invite( + "test:source_two", "test:target_three", None, "room", False + ) + ) + self.assertFalse( + check.user_may_invite( + "test:source_two", "test:target_one", None, "room", False + ) + ) + self.assertFalse( + check.user_may_invite( + "test:source_four", "test:target_one", None, "room", False + ) + ) + + # User cannot invite external user to a published room + self.assertTrue( + check.user_may_invite( + "test:source_one", "test:target_two", None, "room", False, True + ) + ) + + def test_default_allow(self): + config = { + "default": True, + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + }, + } + check = DomainRuleChecker(config) + self.assertTrue( + check.user_may_invite( + "test:source_three", "test:target_one", None, "room", False + ) + ) + + def test_default_deny(self): + config = { + "default": False, + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + }, + } + check = DomainRuleChecker(config) + self.assertFalse( + check.user_may_invite( + "test:source_three", "test:target_one", None, "room", False + ) + ) + + def test_config_parse(self): + config = { + "default": False, + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + }, + } + self.assertEquals(config, DomainRuleChecker.parse_config(config)) + + def test_config_parse_failure(self): + config = { + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + } + } + self.assertRaises(ConfigError, DomainRuleChecker.parse_config, config) + + +class DomainRuleCheckerRoomTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + ] + + hijack_auth = False + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["trusted_third_party_id_servers"] = ["localhost"] + + config["spam_checker"] = { + "module": "synapse.rulecheck.domain_rule_checker.DomainRuleChecker", + "config": { + "default": True, + "domain_mapping": {}, + "can_only_join_rooms_with_invite": True, + "can_only_create_one_to_one_rooms": True, + "can_only_invite_during_room_creation": True, + "can_invite_by_third_party_id": False, + }, + } + + hs = self.setup_test_homeserver(config=config) + return hs + + def prepare(self, reactor, clock, hs): + self.admin_user_id = self.register_user("admin_user", "pass", admin=True) + self.admin_access_token = self.login("admin_user", "pass") + + self.normal_user_id = self.register_user("normal_user", "pass", admin=False) + self.normal_access_token = self.login("normal_user", "pass") + + self.other_user_id = self.register_user("other_user", "pass", admin=False) + + def test_admin_can_create_room(self): + channel = self._create_room(self.admin_access_token) + assert channel.result["code"] == b"200", channel.result + + def test_normal_user_cannot_create_empty_room(self): + channel = self._create_room(self.normal_access_token) + assert channel.result["code"] == b"403", channel.result + + def test_normal_user_cannot_create_room_with_multiple_invites(self): + channel = self._create_room( + self.normal_access_token, + content={"invite": [self.other_user_id, self.admin_user_id]}, + ) + assert channel.result["code"] == b"403", channel.result + + # Test that it correctly counts both normal and third party invites + channel = self._create_room( + self.normal_access_token, + content={ + "invite": [self.other_user_id], + "invite_3pid": [{"medium": "email", "address": "foo@example.com"}], + }, + ) + assert channel.result["code"] == b"403", channel.result + + # Test that it correctly rejects third party invites + channel = self._create_room( + self.normal_access_token, + content={ + "invite": [], + "invite_3pid": [{"medium": "email", "address": "foo@example.com"}], + }, + ) + assert channel.result["code"] == b"403", channel.result + + def test_normal_user_can_room_with_single_invites(self): + channel = self._create_room( + self.normal_access_token, content={"invite": [self.other_user_id]} + ) + assert channel.result["code"] == b"200", channel.result + + def test_cannot_join_public_room(self): + channel = self._create_room(self.admin_access_token) + assert channel.result["code"] == b"200", channel.result + + room_id = channel.json_body["room_id"] + + self.helper.join( + room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=403 + ) + + def test_can_join_invited_room(self): + channel = self._create_room(self.admin_access_token) + assert channel.result["code"] == b"200", channel.result + + room_id = channel.json_body["room_id"] + + self.helper.invite( + room_id, + src=self.admin_user_id, + targ=self.normal_user_id, + tok=self.admin_access_token, + ) + + self.helper.join( + room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200 + ) + + def test_cannot_invite(self): + channel = self._create_room(self.admin_access_token) + assert channel.result["code"] == b"200", channel.result + + room_id = channel.json_body["room_id"] + + self.helper.invite( + room_id, + src=self.admin_user_id, + targ=self.normal_user_id, + tok=self.admin_access_token, + ) + + self.helper.join( + room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200 + ) + + self.helper.invite( + room_id, + src=self.normal_user_id, + targ=self.other_user_id, + tok=self.normal_access_token, + expect_code=403, + ) + + def test_cannot_3pid_invite(self): + """Test that unbound 3pid invites get rejected. + """ + channel = self._create_room(self.admin_access_token) + assert channel.result["code"] == b"200", channel.result + + room_id = channel.json_body["room_id"] + + self.helper.invite( + room_id, + src=self.admin_user_id, + targ=self.normal_user_id, + tok=self.admin_access_token, + ) + + self.helper.join( + room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200 + ) + + self.helper.invite( + room_id, + src=self.normal_user_id, + targ=self.other_user_id, + tok=self.normal_access_token, + expect_code=403, + ) + + request, channel = self.make_request( + "POST", + "rooms/%s/invite" % (room_id), + {"address": "foo@bar.com", "medium": "email", "id_server": "localhost"}, + access_token=self.normal_access_token, + ) + self.render(request) + self.assertEqual(channel.code, 403, channel.result["body"]) + + def _create_room(self, token, content={}): + path = "/_matrix/client/r0/createRoom?access_token=%s" % (token,) + + request, channel = make_request( + self.hs.get_reactor(), + "POST", + path, + content=json.dumps(content).encode("utf8"), + ) + render(request, self.resource, self.hs.get_reactor()) + + return channel diff --git a/tests/server.py b/tests/server.py
index 48e45c6c8b..61ec670155 100644 --- a/tests/server.py +++ b/tests/server.py
@@ -1,6 +1,6 @@ import json import logging -from io import BytesIO +from io import SEEK_END, BytesIO import attr from zope.interface import implementer @@ -135,6 +135,7 @@ def make_request( request=SynapseRequest, shorthand=True, federation_auth_origin=None, + content_is_form=False, ): """ Make a web request using the given method and path, feed it the @@ -150,6 +151,8 @@ def make_request( with the usual REST API path, if it doesn't contain it. federation_auth_origin (bytes|None): if set to not-None, we will add a fake Authorization header pretenting to be the given server name. + content_is_form: Whether the content is URL encoded form data. Adds the + 'Content-Type': 'application/x-www-form-urlencoded' header. Returns: Tuple[synapse.http.site.SynapseRequest, channel] @@ -181,6 +184,8 @@ def make_request( req = request(channel) req.process = lambda: b"" req.content = BytesIO(content) + # Twisted expects to be at the end of the content when parsing the request. + req.content.seek(SEEK_END) req.postpath = list(map(unquote, path[1:].split(b"/"))) if access_token: @@ -195,7 +200,13 @@ def make_request( ) if content: - req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") + if content_is_form: + req.requestHeaders.addRawHeader( + b"Content-Type", b"application/x-www-form-urlencoded" + ) + else: + # Assume the body is JSON + req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") req.requestReceived(method, path, b"1.1") diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py
index 973338ea71..6382b19dc3 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py
@@ -67,7 +67,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): raise Exception("Failed to find reference to ResourceLimitsServerNotices") self._rlsn._store.user_last_seen_monthly_active = Mock( - side_effect=lambda user_id: make_awaitable(1000) + return_value=make_awaitable(1000) ) self._rlsn._server_notices_manager.send_notice = Mock( return_value=defer.succeed(Mock()) @@ -80,9 +80,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): return_value=defer.succeed("!something:localhost") ) self._rlsn._store.add_tag_to_room = Mock(return_value=defer.succeed(None)) - self._rlsn._store.get_tags_for_room = Mock( - side_effect=lambda user_id, room_id: make_awaitable({}) - ) + self._rlsn._store.get_tags_for_room = Mock(return_value=make_awaitable({})) @override_config({"hs_disabled": True}) def test_maybe_send_server_notice_disabled_hs(self): @@ -158,7 +156,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ self._rlsn._auth.check_auth_blocking = Mock(return_value=defer.succeed(None)) self._rlsn._store.user_last_seen_monthly_active = Mock( - side_effect=lambda user_id: make_awaitable(None) + return_value=make_awaitable(None) ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -261,12 +259,10 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): self.user_id = "@user_id:test" def test_server_notice_only_sent_once(self): - self.store.get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(1000) - ) + self.store.get_monthly_active_count = Mock(return_value=make_awaitable(1000)) self.store.user_last_seen_monthly_active = Mock( - side_effect=lambda user_id: make_awaitable(1000) + return_value=make_awaitable(1000) ) # Call the function multiple times to ensure we only send the notice once diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index 370c247e16..755c70db31 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py
@@ -154,7 +154,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): user_id = "@user:server" self.store.get_monthly_active_count = Mock( - side_effect=lambda: make_awaitable(lots_of_users) + return_value=make_awaitable(lots_of_users) ) self.get_success( self.store.insert_client_ip( diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index f0a8e32f1e..20636fc400 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py
@@ -122,6 +122,56 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(id_gen.get_positions(), {"master": 8}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 8) + def test_out_of_order_finish(self): + """Test that IDs persisted out of order are correctly handled + """ + + # Prefill table with 7 rows written by 'master' + self._insert_rows("master", 7) + + id_gen = self._create_id_generator() + + self.assertEqual(id_gen.get_positions(), {"master": 7}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) + + ctx1 = self.get_success(id_gen.get_next()) + ctx2 = self.get_success(id_gen.get_next()) + ctx3 = self.get_success(id_gen.get_next()) + ctx4 = self.get_success(id_gen.get_next()) + + s1 = ctx1.__enter__() + s2 = ctx2.__enter__() + s3 = ctx3.__enter__() + s4 = ctx4.__enter__() + + self.assertEqual(s1, 8) + self.assertEqual(s2, 9) + self.assertEqual(s3, 10) + self.assertEqual(s4, 11) + + self.assertEqual(id_gen.get_positions(), {"master": 7}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) + + ctx2.__exit__(None, None, None) + + self.assertEqual(id_gen.get_positions(), {"master": 7}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) + + ctx1.__exit__(None, None, None) + + self.assertEqual(id_gen.get_positions(), {"master": 9}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 9) + + ctx4.__exit__(None, None, None) + + self.assertEqual(id_gen.get_positions(), {"master": 9}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 9) + + ctx3.__exit__(None, None, None) + + self.assertEqual(id_gen.get_positions(), {"master": 11}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 11) + def test_multi_instance(self): """Test that reads and writes from multiple processes are handled correctly. diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py
index 7e7f1286d9..fe37d2ed5a 100644 --- a/tests/storage/test_main.py +++ b/tests/storage/test_main.py
@@ -39,7 +39,7 @@ class DataStoreTestCase(unittest.TestCase): ) yield defer.ensureDeferred(self.store.create_profile(self.user.localpart)) yield defer.ensureDeferred( - self.store.set_profile_displayname(self.user.localpart, self.displayname) + self.store.set_profile_displayname(self.user.localpart, self.displayname, 1) ) users, total = yield defer.ensureDeferred( diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py
index 9870c74883..643072bbaf 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py
@@ -231,9 +231,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): ) self.get_success(d) - self.store.upsert_monthly_active_user = Mock( - side_effect=lambda user_id: make_awaitable(None) - ) + self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) d = self.store.populate_monthly_active_users(user_id) self.get_success(d) @@ -241,9 +239,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.store.upsert_monthly_active_user.assert_not_called() def test_populate_monthly_users_should_update(self): - self.store.upsert_monthly_active_user = Mock( - side_effect=lambda user_id: make_awaitable(None) - ) + self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) self.store.is_trial_user = Mock(return_value=defer.succeed(False)) @@ -256,9 +252,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.store.upsert_monthly_active_user.assert_called_once() def test_populate_monthly_users_should_not_update(self): - self.store.upsert_monthly_active_user = Mock( - side_effect=lambda user_id: make_awaitable(None) - ) + self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) self.store.is_trial_user = Mock(return_value=defer.succeed(False)) self.store.user_last_seen_monthly_active = Mock( @@ -344,9 +338,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): @override_config({"limit_usage_by_mau": False, "mau_stats_only": False}) def test_no_users_when_not_tracking(self): - self.store.upsert_monthly_active_user = Mock( - side_effect=lambda user_id: make_awaitable(None) - ) + self.store.upsert_monthly_active_user = Mock(return_value=make_awaitable(None)) self.get_success(self.store.populate_monthly_active_users("@user:sever")) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py
index 3fd0a38cf5..7a38022e71 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py
@@ -36,7 +36,7 @@ class ProfileStoreTestCase(unittest.TestCase): yield defer.ensureDeferred(self.store.create_profile(self.u_frank.localpart)) yield defer.ensureDeferred( - self.store.set_profile_displayname(self.u_frank.localpart, "Frank") + self.store.set_profile_displayname(self.u_frank.localpart, "Frank", 1) ) self.assertEquals( @@ -54,7 +54,7 @@ class ProfileStoreTestCase(unittest.TestCase): yield defer.ensureDeferred( self.store.set_profile_avatar_url( - self.u_frank.localpart, "http://my.site/here" + self.u_frank.localpart, "http://my.site/here", 1 ) ) diff --git a/tests/test_types.py b/tests/test_types.py
index 480bea1bdc..d4a722a30f 100644 --- a/tests/test_types.py +++ b/tests/test_types.py
@@ -12,9 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from six import string_types from synapse.api.errors import SynapseError -from synapse.types import GroupID, RoomAlias, UserID, map_username_to_mxid_localpart +from synapse.types import ( + GroupID, + RoomAlias, + UserID, + map_username_to_mxid_localpart, + strip_invalid_mxid_characters, +) from tests import unittest @@ -103,3 +110,16 @@ class MapUsernameTestCase(unittest.TestCase): self.assertEqual( map_username_to_mxid_localpart("têst".encode("utf-8")), "t=c3=aast" ) + + +class StripInvalidMxidCharactersTestCase(unittest.TestCase): + def test_return_type(self): + unstripped = strip_invalid_mxid_characters("test") + stripped = strip_invalid_mxid_characters("test@") + + self.assertTrue(isinstance(unstripped, string_types), type(unstripped)) + self.assertTrue(isinstance(stripped, string_types), type(stripped)) + + def test_strip(self): + stripped = strip_invalid_mxid_characters("test@") + self.assertEqual(stripped, "test", stripped) diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py
index 508aeba078..a298cc0fd3 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py
@@ -17,6 +17,7 @@ """ Utilities for running the unit tests """ +from asyncio import Future from typing import Any, Awaitable, TypeVar TV = TypeVar("TV") @@ -38,6 +39,12 @@ def get_awaitable_result(awaitable: Awaitable[TV]) -> TV: raise Exception("awaitable has not yet completed") -async def make_awaitable(result: Any): - """Create an awaitable that just returns a result.""" - return result +def make_awaitable(result: Any) -> Awaitable[Any]: + """ + Makes an awaitable, suitable for mocking an `async` function. + This uses Futures as they can be awaited multiple times so can be returned + to multiple callers. + """ + future = Future() # type: ignore + future.set_result(result) + return future diff --git a/tests/unittest.py b/tests/unittest.py
index 3cb55a7e96..128dd4e19c 100644 --- a/tests/unittest.py +++ b/tests/unittest.py
@@ -353,6 +353,7 @@ class HomeserverTestCase(TestCase): request: Type[T] = SynapseRequest, shorthand: bool = True, federation_auth_origin: str = None, + content_is_form: bool = False, ) -> Tuple[T, FakeChannel]: """ Create a SynapseRequest at the path using the method and containing the @@ -368,6 +369,8 @@ class HomeserverTestCase(TestCase): with the usual REST API path, if it doesn't contain it. federation_auth_origin (bytes|None): if set to not-None, we will add a fake Authorization header pretenting to be the given server name. + content_is_form: Whether the content is URL encoded form data. Adds the + 'Content-Type': 'application/x-www-form-urlencoded' header. Returns: Tuple[synapse.http.site.SynapseRequest, channel] @@ -384,6 +387,7 @@ class HomeserverTestCase(TestCase): request, shorthand, federation_auth_origin, + content_is_form, ) def render(self, request): diff --git a/tests/utils.py b/tests/utils.py
index 4673872f88..867cf88977 100644 --- a/tests/utils.py +++ b/tests/utils.py
@@ -173,6 +173,8 @@ def default_config(name, parse=False): "update_user_directory": False, "caches": {"global_factor": 1}, "listeners": [{"port": 0, "type": "http"}], + # Enable encryption by default in private rooms + "encryption_enabled_by_default_for_room_type": "invite", } if parse: diff --git a/tox.ini b/tox.ini
index df473bd234..6329ba286a 100644 --- a/tox.ini +++ b/tox.ini
@@ -113,14 +113,14 @@ commands = [testenv:packaging] skip_install=True deps = - check-manifest + check-manifest==0.41 commands = check-manifest [testenv:check_codestyle] skip_install = True deps = - flake8 + flake8==3.8.3 flake8-comprehensions # We pin so that our tests don't start failing on new releases of black. black==19.10b0 @@ -138,7 +138,8 @@ commands = /bin/sh -c "isort -c --df --sp setup.cfg synapse tests scripts-dev sc skip_install = True deps = towncrier>=18.6.0rc1 commands = - python -m towncrier.check --compare-with=origin/develop + python -m towncrier.check --compare-with=origin/dinsic +basepython = python3.6 [testenv:check-sampleconfig] commands = {toxinidir}/scripts-dev/generate_sample_config --check