summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.buildkite/pipeline.yml310
-rw-r--r--.buildkite/worker-blacklist33
-rw-r--r--AUTHORS.rst3
-rw-r--r--MANIFEST.in9
-rw-r--r--changelog.d/1.feature1
-rw-r--r--changelog.d/10.bugfix1
-rw-r--r--changelog.d/11.feature1
-rw-r--r--changelog.d/12.feature1
-rw-r--r--changelog.d/13.feature1
-rw-r--r--changelog.d/14.feature1
-rw-r--r--changelog.d/15.misc1
-rw-r--r--changelog.d/17.misc1
-rw-r--r--changelog.d/18.feature1
-rw-r--r--changelog.d/19.feature1
-rw-r--r--changelog.d/2.bugfix1
-rw-r--r--changelog.d/20.bugfix1
-rw-r--r--changelog.d/21.bugfix1
-rw-r--r--changelog.d/3.bugfix1
-rw-r--r--changelog.d/4.bugfix1
-rw-r--r--changelog.d/5.bugfix1
-rw-r--r--changelog.d/5083.feature1
-rw-r--r--changelog.d/5098.misc1
-rw-r--r--changelog.d/5214.feature1
-rw-r--r--changelog.d/5416.misc1
-rw-r--r--changelog.d/5420.feature1
-rw-r--r--changelog.d/5610.feature1
-rw-r--r--changelog.d/5702.bugfix1
-rw-r--r--changelog.d/5760.feature1
-rw-r--r--changelog.d/6.bugfix1
-rw-r--r--changelog.d/6349.feature1
-rw-r--r--changelog.d/6377.bugfix1
-rw-r--r--changelog.d/6385.bugfix1
-rw-r--r--changelog.d/6394.feature1
-rw-r--r--changelog.d/6411.feature1
-rw-r--r--changelog.d/6453.feature1
-rw-r--r--changelog.d/6486.bugfix1
-rw-r--r--changelog.d/6496.misc1
-rw-r--r--changelog.d/6502.removal1
-rw-r--r--changelog.d/6504.misc1
-rw-r--r--changelog.d/6505.misc1
-rw-r--r--changelog.d/6506.misc1
-rw-r--r--changelog.d/6510.misc1
-rw-r--r--changelog.d/6511.misc1
-rw-r--r--changelog.d/6512.misc1
-rw-r--r--changelog.d/6514.bugfix1
-rw-r--r--changelog.d/6515.misc1
-rw-r--r--changelog.d/6517.misc1
-rw-r--r--changelog.d/6522.bugfix1
-rw-r--r--changelog.d/6534.misc1
-rw-r--r--changelog.d/6537.misc1
-rw-r--r--changelog.d/6538.misc1
-rw-r--r--changelog.d/6541.doc1
-rw-r--r--changelog.d/9.misc1
-rw-r--r--contrib/systemd/README.md17
-rw-r--r--docs/admin_api/shutdown_room.md72
-rw-r--r--docs/saml_mapping_providers.md77
-rw-r--r--docs/sample_config.yaml253
-rw-r--r--docs/sphinx/conf.py8
-rw-r--r--docs/workers.md14
-rw-r--r--mypy.ini2
-rw-r--r--res/templates-dinsic/mail-Vector.css7
-rw-r--r--res/templates-dinsic/mail.css156
-rw-r--r--res/templates-dinsic/notif.html45
-rw-r--r--res/templates-dinsic/notif.txt16
-rw-r--r--res/templates-dinsic/notif_mail.html55
-rw-r--r--res/templates-dinsic/notif_mail.txt10
-rw-r--r--res/templates-dinsic/room.html33
-rw-r--r--res/templates-dinsic/room.txt9
-rwxr-xr-xscripts-dev/check-newsfragment6
-rwxr-xr-xscripts-dev/make_full_schema.sh184
-rwxr-xr-xscripts-dev/update_database33
-rw-r--r--synapse/api/auth.py11
-rw-r--r--synapse/api/constants.py1
-rw-r--r--synapse/api/errors.py22
-rw-r--r--synapse/app/admin_cmd.py5
-rw-r--r--synapse/app/appservice.py5
-rw-r--r--synapse/app/client_reader.py5
-rw-r--r--synapse/app/event_creator.py5
-rw-r--r--synapse/app/federation_reader.py5
-rw-r--r--synapse/app/federation_sender.py5
-rw-r--r--synapse/app/frontend_proxy.py5
-rw-r--r--synapse/app/homeserver.py13
-rw-r--r--synapse/app/media_repository.py5
-rw-r--r--synapse/app/pusher.py5
-rw-r--r--synapse/app/synchrotron.py5
-rw-r--r--synapse/app/user_dir.py5
-rw-r--r--synapse/appservice/__init__.py2
-rw-r--r--synapse/config/_base.py3
-rw-r--r--synapse/config/emailconfig.py3
-rw-r--r--synapse/config/password.py38
-rw-r--r--synapse/config/ratelimiting.py12
-rw-r--r--synapse/config/registration.py75
-rw-r--r--synapse/config/repository.py30
-rw-r--r--synapse/config/saml2_config.py186
-rw-r--r--synapse/config/server.py89
-rw-r--r--synapse/config/user_directory.py9
-rw-r--r--synapse/event_auth.py8
-rw-r--r--synapse/events/spamcheck.py63
-rw-r--r--synapse/federation/federation_client.py88
-rw-r--r--synapse/federation/federation_server.py15
-rw-r--r--synapse/federation/transport/client.py33
-rw-r--r--synapse/federation/transport/server.py32
-rw-r--r--synapse/groups/groups_server.py5
-rw-r--r--synapse/handlers/account_data.py14
-rw-r--r--synapse/handlers/account_validity.py112
-rw-r--r--synapse/handlers/deactivate_account.py4
-rw-r--r--synapse/handlers/e2e_keys.py35
-rw-r--r--synapse/handlers/federation.py137
-rw-r--r--synapse/handlers/identity.py193
-rw-r--r--synapse/handlers/initial_sync.py115
-rw-r--r--synapse/handlers/message.py5
-rw-r--r--synapse/handlers/pagination.py27
-rw-r--r--synapse/handlers/password_policy.py93
-rw-r--r--synapse/handlers/profile.py219
-rw-r--r--synapse/handlers/register.py78
-rw-r--r--synapse/handlers/room.py37
-rw-r--r--synapse/handlers/room_member.py105
-rw-r--r--synapse/handlers/saml_handler.py198
-rw-r--r--synapse/handlers/search.py34
-rw-r--r--synapse/handlers/set_password.py6
-rw-r--r--synapse/logging/_terse_json.py2
-rw-r--r--synapse/logging/context.py14
-rw-r--r--synapse/rest/__init__.py2
-rw-r--r--synapse/rest/client/v1/profile.py35
-rw-r--r--synapse/rest/client/v1/room.py3
-rw-r--r--synapse/rest/client/v2_alpha/account.py220
-rw-r--r--synapse/rest/client/v2_alpha/account_data.py9
-rw-r--r--synapse/rest/client/v2_alpha/password_policy.py58
-rw-r--r--synapse/rest/client/v2_alpha/register.py204
-rw-r--r--synapse/rest/client/v2_alpha/user_directory.py98
-rw-r--r--synapse/rulecheck/__init__.py0
-rw-r--r--synapse/rulecheck/domain_rule_checker.py181
-rw-r--r--synapse/server.py18
-rw-r--r--synapse/state/__init__.py3
-rw-r--r--synapse/storage/data_stores/main/appservice.py2
-rw-r--r--synapse/storage/data_stores/main/client_ips.py8
-rw-r--r--synapse/storage/data_stores/main/end_to_end_keys.py217
-rw-r--r--synapse/storage/data_stores/main/events_worker.py97
-rw-r--r--synapse/storage/data_stores/main/profile.py96
-rw-r--r--synapse/storage/data_stores/main/registration.py22
-rw-r--r--synapse/storage/data_stores/main/room.py23
-rw-r--r--synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql29
-rw-r--r--synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres15
-rw-r--r--synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite4
-rw-r--r--synapse/storage/data_stores/main/schema/full_schemas/README.md13
-rw-r--r--synapse/storage/data_stores/main/schema/full_schemas/README.txt19
-rw-r--r--synapse/storage/data_stores/main/search.py19
-rw-r--r--synapse/storage/data_stores/main/state.py18
-rw-r--r--synapse/storage/schema/delta/48/profiles_batch.sql36
-rw-r--r--synapse/storage/schema/delta/50/profiles_deactivated_users.sql23
-rw-r--r--synapse/storage/schema/delta/55/profile_replication_status_index.sql17
-rw-r--r--synapse/storage/schema/delta/55/room_retention.sql (renamed from synapse/storage/data_stores/main/schema/delta/56/room_retention.sql)0
-rw-r--r--synapse/streams/events.py4
-rw-r--r--synapse/third_party_rules/access_rules.py586
-rw-r--r--synapse/types.py15
-rw-r--r--synapse/util/caches/descriptors.py2
-rw-r--r--synapse/util/caches/snapshot_cache.py94
-rw-r--r--synapse/util/stringutils.py14
-rw-r--r--synapse/util/threepids.py40
-rw-r--r--sytest-blacklist21
-rw-r--r--tests/handlers/test_e2e_keys.py8
-rw-r--r--tests/handlers/test_identity.py115
-rw-r--r--tests/handlers/test_profile.py9
-rw-r--r--tests/handlers/test_register.py21
-rw-r--r--tests/handlers/test_stats.py6
-rw-r--r--tests/rest/client/test_identity.py147
-rw-r--r--tests/rest/client/test_retention.py2
-rw-r--r--tests/rest/client/test_room_access_rules.py726
-rw-r--r--tests/rest/client/v1/test_profile.py1
-rw-r--r--tests/rest/client/v2_alpha/test_password_policy.py177
-rw-r--r--tests/rest/client/v2_alpha/test_register.py183
-rw-r--r--tests/rulecheck/__init__.py14
-rw-r--r--tests/rulecheck/test_domainrulecheck.py334
-rw-r--r--tests/storage/test_client_ips.py49
-rw-r--r--tests/storage/test_keys.py15
-rw-r--r--tests/storage/test_profile.py8
-rw-r--r--tests/test_federation.py14
-rw-r--r--tests/test_types.py41
-rw-r--r--tests/util/test_logcontext.py24
-rw-r--r--tests/util/test_snapshot_cache.py63
-rw-r--r--tests/utils.py2
-rw-r--r--tox.ini18
182 files changed, 6949 insertions, 915 deletions
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
new file mode 100644

index 0000000000..ff05a7428b --- /dev/null +++ b/.buildkite/pipeline.yml
@@ -0,0 +1,310 @@ +env: + COVERALLS_REPO_TOKEN: wsJWOby6j0uCYFiCes3r0XauxO27mx8lD + +steps: + - command: + - "python -m pip install tox" + - "tox -e check_codestyle" + label: "\U0001F9F9 Check Style" + plugins: + - docker#v3.0.1: + image: "python:3.6" + mount-buildkite-agent: false + + - command: + - "python -m pip install tox" + - "tox -e packaging" + label: "\U0001F9F9 packaging" + plugins: + - docker#v3.0.1: + image: "python:3.6" + mount-buildkite-agent: false + + - command: + - "python -m pip install tox" + - "tox -e check_isort" + label: "\U0001F9F9 isort" + plugins: + - docker#v3.0.1: + image: "python:3.6" + mount-buildkite-agent: false + + - command: + - "python -m pip install tox" + - "scripts-dev/check-newsfragment" + label: ":newspaper: Newsfile" + branches: "!master !develop !release-*" + plugins: + - docker#v3.0.1: + image: "python:3.6" + propagate-environment: true + mount-buildkite-agent: false + + - command: + - "python -m pip install tox" + - "tox -e check-sampleconfig" + label: "\U0001F9F9 check-sample-config" + plugins: + - docker#v3.0.1: + image: "python:3.6" + mount-buildkite-agent: false + + - command: + - "python -m pip install tox" + - "tox -e mypy" + label: ":mypy: mypy" + plugins: + - docker#v3.0.1: + image: "python:3.5" + mount-buildkite-agent: false + + - wait + + - command: + - "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev" + - "python3.5 -m pip install tox" + - "tox -e py35-old,combine" + label: ":python: 3.5 / SQLite / Old Deps" + env: + TRIAL_FLAGS: "-j 2" + LANG: "C.UTF-8" + plugins: + - docker#v3.0.1: + image: "ubuntu:xenial" # We use xenial to get an old sqlite and python + workdir: "/src" + mount-buildkite-agent: false + propagate-environment: true + - matrix-org/coveralls#v1.0: + parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - command: + - "python -m pip install tox" + - "tox -e py35,combine" + label: ":python: 3.5 / SQLite" + env: + TRIAL_FLAGS: "-j 2" + plugins: + - docker#v3.0.1: + image: "python:3.5" + workdir: "/src" + mount-buildkite-agent: false + propagate-environment: true + - matrix-org/coveralls#v1.0: + parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - command: + - "python -m pip install tox" + - "tox -e py36,combine" + label: ":python: 3.6 / SQLite" + env: + TRIAL_FLAGS: "-j 2" + plugins: + - docker#v3.0.1: + image: "python:3.6" + workdir: "/src" + mount-buildkite-agent: false + propagate-environment: true + - matrix-org/coveralls#v1.0: + parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - command: + - "python -m pip install tox" + - "tox -e py37,combine" + label: ":python: 3.7 / SQLite" + env: + TRIAL_FLAGS: "-j 2" + plugins: + - docker#v3.0.1: + image: "python:3.7" + workdir: "/src" + mount-buildkite-agent: false + propagate-environment: true + - matrix-org/coveralls#v1.0: + parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: ":python: 3.5 / :postgres: 9.5" + agents: + queue: "medium" + env: + TRIAL_FLAGS: "-j 8" + command: + - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,combine'" + plugins: + - docker-compose#v2.1.0: + run: testenv + config: + - .buildkite/docker-compose.py35.pg95.yaml + - matrix-org/coveralls#v1.0: + parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: ":python: 3.7 / :postgres: 9.5" + agents: + queue: "medium" + env: + TRIAL_FLAGS: "-j 8" + command: + - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'" + plugins: + - docker-compose#v2.1.0: + run: testenv + config: + - .buildkite/docker-compose.py37.pg95.yaml + - matrix-org/coveralls#v1.0: + parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: ":python: 3.7 / :postgres: 11" + agents: + queue: "medium" + env: + TRIAL_FLAGS: "-j 8" + command: + - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'" + plugins: + - docker-compose#v2.1.0: + run: testenv + config: + - .buildkite/docker-compose.py37.pg11.yaml + - matrix-org/coveralls#v1.0: + parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: "SyTest - :python: 3.5 / SQLite / Monolith" + agents: + queue: "medium" + command: + - "bash .buildkite/merge_base_branch.sh" + - "bash /synapse_sytest.sh" + plugins: + - docker#v3.0.1: + image: "matrixdotorg/sytest-synapse:py35" + propagate-environment: true + always-pull: true + workdir: "/src" + entrypoint: ["/bin/sh", "-e", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] + - matrix-org/annotate: + path: "logs/annotate.md" + class: "error" + - matrix-org/coveralls#v1.0: + parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Monolith" + agents: + queue: "xlarge" + env: + POSTGRES: "1" + command: + - "bash .buildkite/merge_base_branch.sh" + - "bash /synapse_sytest.sh" + plugins: + - docker#v3.0.1: + image: "matrixdotorg/sytest-synapse:dinsic-py3" + propagate-environment: true + always-pull: true + workdir: "/src" + entrypoint: ["/bin/sh", "-e", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] + - matrix-org/annotate: + path: "logs/annotate.md" + class: "error" + - matrix-org/coveralls#v1.0: + parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - label: "SyTest - :python: 3 / :postgres: 9.6 / Workers" + agents: + queue: "medium" + env: + POSTGRES: "1" + WORKERS: "1" + BLACKLIST: "synapse-blacklist-with-workers" + command: + - "bash .buildkite/merge_base_branch.sh" + - "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'" + - "bash /synapse_sytest.sh" + plugins: + - docker#v3.0.1: + image: "matrixdotorg/sytest-synapse:dinsic-py3" + propagate-environment: true + always-pull: true + workdir: "/src" + entrypoint: ["/bin/sh", "-e", "-c"] + mount-buildkite-agent: false + volumes: ["./logs:/logs"] + - artifacts#v1.2.0: + upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ] + - matrix-org/annotate: + path: "logs/annotate.md" + class: "error" + - matrix-org/coveralls#v1.0: + parallel: "true" + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 + + - wait: ~ + continue_on_failure: true + + - label: Trigger webhook + command: "curl -k https://coveralls.io/webhook?repo_token=$COVERALLS_REPO_TOKEN -d \"payload[build_num]=$BUILDKITE_BUILD_NUMBER&payload[status]=done\"" diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist
index 7950d19db3..158ab79154 100644 --- a/.buildkite/worker-blacklist +++ b/.buildkite/worker-blacklist
@@ -34,33 +34,8 @@ Device list doesn't change if remote server is down Remote servers cannot set power levels in rooms without existing powerlevels Remote servers should reject attempts by non-creators to set the power levels -# new failures as of https://github.com/matrix-org/sytest/pull/753 -GET /rooms/:room_id/messages returns a message -GET /rooms/:room_id/messages lazy loads members correctly -Read receipts are sent as events -Only original members of the room can see messages from erased users -Device deletion propagates over federation -If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes -Changing user-signing key notifies local users -Newly updated tags appear in an incremental v2 /sync +# https://buildkite.com/matrix-dot-org/synapse/builds/6134#6f67bf47-e234-474d-80e8-c6e1868b15c5 Server correctly handles incoming m.device_list_update -Local device key changes get to remote servers with correct prev_id -AS-ghosted users can use rooms via AS -Ghost user must register before joining room -Test that a message is pushed -Invites are pushed -Rooms with aliases are correctly named in pushed -Rooms with names are correctly named in pushed -Rooms with canonical alias are correctly named in pushed -Rooms with many users are correctly pushed -Don't get pushed for rooms you've muted -Rejected events are not pushed -Test that rejected pushers are removed. -Events come down the correct room - -# https://buildkite.com/matrix-dot-org/sytest/builds/326#cca62404-a88a-4fcb-ad41-175fd3377603 -Presence changes to UNAVAILABLE are reported to remote room members -If remote user leaves room, changes device and rejoins we see update in sync -uploading self-signing key notifies over federation -Inbound federation can receive redacted events -Outbound federation can request missing events + +# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536 +Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state diff --git a/AUTHORS.rst b/AUTHORS.rst
index b8b31a5b47..014f16d4a2 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst
@@ -46,3 +46,6 @@ Joseph Weston <joseph at weston.cloud> Benjamin Saunders <ben.e.saunders at gmail dot com> * Documentation improvements + +Werner Sembach <werner.sembach at fau dot de> + * Automatically remove a group/community when it is empty diff --git a/MANIFEST.in b/MANIFEST.in
index 156d6f04f7..5eb8e62d34 100644 --- a/MANIFEST.in +++ b/MANIFEST.in
@@ -1,4 +1,5 @@ include synctl +include sytest-blacklist include LICENSE include VERSION include *.rst @@ -50,3 +51,11 @@ prune docker prune mypy.ini prune snap prune stubs + +exclude jenkins* +recursive-exclude jenkins *.sh + +# FIXME: we shouldn't have these templates here +recursive-include res/templates-dinsic *.css +recursive-include res/templates-dinsic *.html +recursive-include res/templates-dinsic *.txt diff --git a/changelog.d/1.feature b/changelog.d/1.feature new file mode 100644
index 0000000000..845642e445 --- /dev/null +++ b/changelog.d/1.feature
@@ -0,0 +1 @@ +Forbid changing the name, avatar or topic of a direct room. diff --git a/changelog.d/10.bugfix b/changelog.d/10.bugfix new file mode 100644
index 0000000000..51f89f46dd --- /dev/null +++ b/changelog.d/10.bugfix
@@ -0,0 +1 @@ +Don't apply retention policy based filtering on state events. diff --git a/changelog.d/11.feature b/changelog.d/11.feature new file mode 100644
index 0000000000..362e4b1efd --- /dev/null +++ b/changelog.d/11.feature
@@ -0,0 +1 @@ +Allow server admins to configure a custom global rate-limiting for third party invites. \ No newline at end of file diff --git a/changelog.d/12.feature b/changelog.d/12.feature new file mode 100644
index 0000000000..8e6e7a28af --- /dev/null +++ b/changelog.d/12.feature
@@ -0,0 +1 @@ +Add `/user/:user_id/info` CS servlet and to give user deactivated/expired information. \ No newline at end of file diff --git a/changelog.d/13.feature b/changelog.d/13.feature new file mode 100644
index 0000000000..c2d2e93abf --- /dev/null +++ b/changelog.d/13.feature
@@ -0,0 +1 @@ +Hide expired users from the user directory, and optionally re-add them on renewal. \ No newline at end of file diff --git a/changelog.d/14.feature b/changelog.d/14.feature new file mode 100644
index 0000000000..020d0bac1e --- /dev/null +++ b/changelog.d/14.feature
@@ -0,0 +1 @@ +User displaynames now have capitalised letters after - symbols. \ No newline at end of file diff --git a/changelog.d/15.misc b/changelog.d/15.misc new file mode 100644
index 0000000000..4cc4a5175f --- /dev/null +++ b/changelog.d/15.misc
@@ -0,0 +1 @@ +Fix the ordering on `scripts/generate_signing_key.py`'s import statement. diff --git a/changelog.d/17.misc b/changelog.d/17.misc new file mode 100644
index 0000000000..58120ab5c7 --- /dev/null +++ b/changelog.d/17.misc
@@ -0,0 +1 @@ +Blacklist some flaky sytests until they're fixed. \ No newline at end of file diff --git a/changelog.d/18.feature b/changelog.d/18.feature new file mode 100644
index 0000000000..f5aa29a6e8 --- /dev/null +++ b/changelog.d/18.feature
@@ -0,0 +1 @@ +Add option `limit_profile_requests_to_known_users` to prevent requirement of a user sharing a room with another user to query their profile information. \ No newline at end of file diff --git a/changelog.d/19.feature b/changelog.d/19.feature new file mode 100644
index 0000000000..95a44a4a89 --- /dev/null +++ b/changelog.d/19.feature
@@ -0,0 +1 @@ +Add `max_avatar_size` and `allowed_avatar_mimetypes` to restrict the size of user avatars and their file type respectively. \ No newline at end of file diff --git a/changelog.d/2.bugfix b/changelog.d/2.bugfix new file mode 100644
index 0000000000..4fe5691468 --- /dev/null +++ b/changelog.d/2.bugfix
@@ -0,0 +1 @@ +Don't treat 3PID revocation as a new 3PID invite. diff --git a/changelog.d/20.bugfix b/changelog.d/20.bugfix new file mode 100644
index 0000000000..8ba53c28f9 --- /dev/null +++ b/changelog.d/20.bugfix
@@ -0,0 +1 @@ +Validate `client_secret` parameter against the regex provided by the C-S spec. \ No newline at end of file diff --git a/changelog.d/21.bugfix b/changelog.d/21.bugfix new file mode 100644
index 0000000000..630d7812f7 --- /dev/null +++ b/changelog.d/21.bugfix
@@ -0,0 +1 @@ +Fix resetting user passwords via a phone number. diff --git a/changelog.d/3.bugfix b/changelog.d/3.bugfix new file mode 100644
index 0000000000..cc4bcefa80 --- /dev/null +++ b/changelog.d/3.bugfix
@@ -0,0 +1 @@ +Fix encoding on password reset HTML responses in Python 2. diff --git a/changelog.d/4.bugfix b/changelog.d/4.bugfix new file mode 100644
index 0000000000..fe717920a6 --- /dev/null +++ b/changelog.d/4.bugfix
@@ -0,0 +1 @@ +Fix handling of filtered strings in Python 3. diff --git a/changelog.d/5.bugfix b/changelog.d/5.bugfix new file mode 100644
index 0000000000..53f57f46ca --- /dev/null +++ b/changelog.d/5.bugfix
@@ -0,0 +1 @@ +Fix room retention policy management in worker mode. diff --git a/changelog.d/5083.feature b/changelog.d/5083.feature new file mode 100644
index 0000000000..2ffdd37eef --- /dev/null +++ b/changelog.d/5083.feature
@@ -0,0 +1 @@ +Adds auth_profile_reqs option to require access_token to GET /profile endpoints on CS API. diff --git a/changelog.d/5098.misc b/changelog.d/5098.misc new file mode 100644
index 0000000000..9cd83bf226 --- /dev/null +++ b/changelog.d/5098.misc
@@ -0,0 +1 @@ +Add workarounds for pep-517 install errors. diff --git a/changelog.d/5214.feature b/changelog.d/5214.feature new file mode 100644
index 0000000000..6c0f15c901 --- /dev/null +++ b/changelog.d/5214.feature
@@ -0,0 +1 @@ +Allow server admins to define and enforce a password policy (MSC2000). diff --git a/changelog.d/5416.misc b/changelog.d/5416.misc new file mode 100644
index 0000000000..155e8c7cd3 --- /dev/null +++ b/changelog.d/5416.misc
@@ -0,0 +1 @@ +Add unique index to the profile_replication_status table. diff --git a/changelog.d/5420.feature b/changelog.d/5420.feature new file mode 100644
index 0000000000..745864b903 --- /dev/null +++ b/changelog.d/5420.feature
@@ -0,0 +1 @@ +Add configuration option to hide new users from the user directory. diff --git a/changelog.d/5610.feature b/changelog.d/5610.feature new file mode 100644
index 0000000000..b99514f97e --- /dev/null +++ b/changelog.d/5610.feature
@@ -0,0 +1 @@ +Implement new custom event rules for power levels. diff --git a/changelog.d/5702.bugfix b/changelog.d/5702.bugfix new file mode 100644
index 0000000000..43b6e39b13 --- /dev/null +++ b/changelog.d/5702.bugfix
@@ -0,0 +1 @@ +Fix 3PID invite to invite association detection in the Tchap room access rules. diff --git a/changelog.d/5760.feature b/changelog.d/5760.feature new file mode 100644
index 0000000000..90302d793e --- /dev/null +++ b/changelog.d/5760.feature
@@ -0,0 +1 @@ +Force the access rule to be "restricted" if the join rule is "public". diff --git a/changelog.d/6.bugfix b/changelog.d/6.bugfix new file mode 100644
index 0000000000..43ab65cc95 --- /dev/null +++ b/changelog.d/6.bugfix
@@ -0,0 +1 @@ +Don't forbid membership events which membership isn't 'join' or 'invite' in restricted rooms, so that users who got into these rooms before the access rules started to be enforced can leave them. diff --git a/changelog.d/6349.feature b/changelog.d/6349.feature new file mode 100644
index 0000000000..56c4fbf78e --- /dev/null +++ b/changelog.d/6349.feature
@@ -0,0 +1 @@ +Implement v2 APIs for the `send_join` and `send_leave` federation endpoints (as described in [MSC1802](https://github.com/matrix-org/matrix-doc/pull/1802)). diff --git a/changelog.d/6377.bugfix b/changelog.d/6377.bugfix new file mode 100644
index 0000000000..ccda96962f --- /dev/null +++ b/changelog.d/6377.bugfix
@@ -0,0 +1 @@ +Prevent redacted events from being returned during message search. \ No newline at end of file diff --git a/changelog.d/6385.bugfix b/changelog.d/6385.bugfix new file mode 100644
index 0000000000..7a2bc02170 --- /dev/null +++ b/changelog.d/6385.bugfix
@@ -0,0 +1 @@ +Prevent error on trying to search a upgraded room when the server is not in the predecessor room. \ No newline at end of file diff --git a/changelog.d/6394.feature b/changelog.d/6394.feature new file mode 100644
index 0000000000..1a0e8845ad --- /dev/null +++ b/changelog.d/6394.feature
@@ -0,0 +1 @@ +Add a develop script to generate full SQL schemas. \ No newline at end of file diff --git a/changelog.d/6411.feature b/changelog.d/6411.feature new file mode 100644
index 0000000000..ebea4a208d --- /dev/null +++ b/changelog.d/6411.feature
@@ -0,0 +1 @@ +Allow custom SAML username mapping functinality through an external provider plugin. \ No newline at end of file diff --git a/changelog.d/6453.feature b/changelog.d/6453.feature new file mode 100644
index 0000000000..e7bb801c6a --- /dev/null +++ b/changelog.d/6453.feature
@@ -0,0 +1 @@ +Automatically delete empty groups/communities. diff --git a/changelog.d/6486.bugfix b/changelog.d/6486.bugfix new file mode 100644
index 0000000000..b98c5a9ae5 --- /dev/null +++ b/changelog.d/6486.bugfix
@@ -0,0 +1 @@ +Improve performance of looking up cross-signing keys. diff --git a/changelog.d/6496.misc b/changelog.d/6496.misc new file mode 100644
index 0000000000..19c6e926b8 --- /dev/null +++ b/changelog.d/6496.misc
@@ -0,0 +1 @@ +Port synapse.handlers.initial_sync to async/await. diff --git a/changelog.d/6502.removal b/changelog.d/6502.removal new file mode 100644
index 0000000000..0b72261d58 --- /dev/null +++ b/changelog.d/6502.removal
@@ -0,0 +1 @@ +Remove redundant code from event authorisation implementation. diff --git a/changelog.d/6504.misc b/changelog.d/6504.misc new file mode 100644
index 0000000000..7c873459af --- /dev/null +++ b/changelog.d/6504.misc
@@ -0,0 +1 @@ +Port handlers.account_data and handlers.account_validity to async/await. diff --git a/changelog.d/6505.misc b/changelog.d/6505.misc new file mode 100644
index 0000000000..3a75b2d9dd --- /dev/null +++ b/changelog.d/6505.misc
@@ -0,0 +1 @@ +Make `make_deferred_yieldable` to work with async/await. diff --git a/changelog.d/6506.misc b/changelog.d/6506.misc new file mode 100644
index 0000000000..99d7a70bcf --- /dev/null +++ b/changelog.d/6506.misc
@@ -0,0 +1 @@ +Remove `SnapshotCache` in favour of `ResponseCache`. diff --git a/changelog.d/6510.misc b/changelog.d/6510.misc new file mode 100644
index 0000000000..214f06539b --- /dev/null +++ b/changelog.d/6510.misc
@@ -0,0 +1 @@ +Change phone home stats to not assume there is a single database and report information about the database used by the main data store. diff --git a/changelog.d/6511.misc b/changelog.d/6511.misc new file mode 100644
index 0000000000..19ce435e68 --- /dev/null +++ b/changelog.d/6511.misc
@@ -0,0 +1 @@ +Move database config from apps into HomeServer object. diff --git a/changelog.d/6512.misc b/changelog.d/6512.misc new file mode 100644
index 0000000000..37a8099eec --- /dev/null +++ b/changelog.d/6512.misc
@@ -0,0 +1 @@ +Silence mypy errors for files outside those specified. diff --git a/changelog.d/6514.bugfix b/changelog.d/6514.bugfix new file mode 100644
index 0000000000..6dc1985c24 --- /dev/null +++ b/changelog.d/6514.bugfix
@@ -0,0 +1 @@ +Fix race which occasionally caused deleted devices to reappear. diff --git a/changelog.d/6515.misc b/changelog.d/6515.misc new file mode 100644
index 0000000000..a9c303ed1c --- /dev/null +++ b/changelog.d/6515.misc
@@ -0,0 +1 @@ +Clean up some logging when handling incoming events over federation. diff --git a/changelog.d/6517.misc b/changelog.d/6517.misc new file mode 100644
index 0000000000..c6ffed9952 --- /dev/null +++ b/changelog.d/6517.misc
@@ -0,0 +1 @@ +Port some of FederationHandler to async/await. \ No newline at end of file diff --git a/changelog.d/6522.bugfix b/changelog.d/6522.bugfix new file mode 100644
index 0000000000..ccda96962f --- /dev/null +++ b/changelog.d/6522.bugfix
@@ -0,0 +1 @@ +Prevent redacted events from being returned during message search. \ No newline at end of file diff --git a/changelog.d/6534.misc b/changelog.d/6534.misc new file mode 100644
index 0000000000..7df6bb442a --- /dev/null +++ b/changelog.d/6534.misc
@@ -0,0 +1 @@ +Test more folders against mypy. diff --git a/changelog.d/6537.misc b/changelog.d/6537.misc new file mode 100644
index 0000000000..3543153584 --- /dev/null +++ b/changelog.d/6537.misc
@@ -0,0 +1 @@ +Update `mypy` to new version. diff --git a/changelog.d/6538.misc b/changelog.d/6538.misc new file mode 100644
index 0000000000..cb4fd56948 --- /dev/null +++ b/changelog.d/6538.misc
@@ -0,0 +1 @@ +Adjust the sytest blacklist for worker mode. diff --git a/changelog.d/6541.doc b/changelog.d/6541.doc new file mode 100644
index 0000000000..c20029edc0 --- /dev/null +++ b/changelog.d/6541.doc
@@ -0,0 +1 @@ +Document the Room Shutdown Admin API. \ No newline at end of file diff --git a/changelog.d/9.misc b/changelog.d/9.misc new file mode 100644
index 0000000000..24fd12c978 --- /dev/null +++ b/changelog.d/9.misc
@@ -0,0 +1 @@ +Add SyTest to the BuildKite CI. diff --git a/contrib/systemd/README.md b/contrib/systemd/README.md deleted file mode 100644
index 5d42b3464f..0000000000 --- a/contrib/systemd/README.md +++ /dev/null
@@ -1,17 +0,0 @@ -# Setup Synapse with Systemd -This is a setup for managing synapse with a user contributed systemd unit -file. It provides a `matrix-synapse` systemd unit file that should be tailored -to accommodate your installation in accordance with the installation -instructions provided in [installation instructions](../../INSTALL.md). - -## Setup -1. Under the service section, ensure the `User` variable matches which user -you installed synapse under and wish to run it as. -2. Under the service section, ensure the `WorkingDirectory` variable matches -where you have installed synapse. -3. Under the service section, ensure the `ExecStart` variable matches the -appropriate locations of your installation. -4. Copy the `matrix-synapse.service` to `/etc/systemd/system/` -5. Start Synapse: `sudo systemctl start matrix-synapse` -6. Verify Synapse is running: `sudo systemctl status matrix-synapse` -7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse` diff --git a/docs/admin_api/shutdown_room.md b/docs/admin_api/shutdown_room.md new file mode 100644
index 0000000000..54ce1cd234 --- /dev/null +++ b/docs/admin_api/shutdown_room.md
@@ -0,0 +1,72 @@ +# Shutdown room API + +Shuts down a room, preventing new joins and moves local users and room aliases automatically +to a new room. The new room will be created with the user specified by the +`new_room_user_id` parameter as room administrator and will contain a message +explaining what happened. Users invited to the new room will have power level +-10 by default, and thus be unable to speak. The old room's power levels will be changed to +disallow any further invites or joins. + +The local server will only have the power to move local user and room aliases to +the new room. Users on other servers will be unaffected. + +## API + +You will need to authenticate with an access token for an admin user. + +### URL + +`POST /_synapse/admin/v1/shutdown_room/{room_id}` + +### URL Parameters + +* `room_id` - The ID of the room (e.g `!someroom:example.com`) + +### JSON Body Parameters + +* `new_room_user_id` - Required. A string representing the user ID of the user that will admin + the new room that all users in the old room will be moved to. +* `room_name` - Optional. A string representing the name of the room that new users will be + invited to. +* `message` - Optional. A string containing the first message that will be sent as + `new_room_user_id` in the new room. Ideally this will clearly convey why the + original room was shut down. + +If not specified, the default value of `room_name` is "Content Violation +Notification". The default value of `message` is "Sharing illegal content on +othis server is not permitted and rooms in violation will be blocked." + +### Response Parameters + +* `kicked_users` - An integer number representing the number of users that + were kicked. +* `failed_to_kick_users` - An integer number representing the number of users + that were not kicked. +* `local_aliases` - An array of strings representing the local aliases that were migrated from + the old room to the new. +* `new_room_id` - A string representing the room ID of the new room. + +## Example + +Request: + +``` +POST /_synapse/admin/v1/shutdown_room/!somebadroom%3Aexample.com + +{ + "new_room_user_id": "@someuser:example.com", + "room_name": "Content Violation Notification", + "message": "Bad Room has been shutdown due to content violations on this server. Please review our Terms of Service." +} +``` + +Response: + +``` +{ + "kicked_users": 5, + "failed_to_kick_users": 0, + "local_aliases": ["#badroom:example.com", "#evilsaloon:example.com], + "new_room_id": "!newroomid:example.com", +}, +``` diff --git a/docs/saml_mapping_providers.md b/docs/saml_mapping_providers.md new file mode 100644
index 0000000000..92f2380488 --- /dev/null +++ b/docs/saml_mapping_providers.md
@@ -0,0 +1,77 @@ +# SAML Mapping Providers + +A SAML mapping provider is a Python class (loaded via a Python module) that +works out how to map attributes of a SAML response object to Matrix-specific +user attributes. Details such as user ID localpart, displayname, and even avatar +URLs are all things that can be mapped from talking to a SSO service. + +As an example, a SSO service may return the email address +"john.smith@example.com" for a user, whereas Synapse will need to figure out how +to turn that into a displayname when creating a Matrix user for this individual. +It may choose `John Smith`, or `Smith, John [Example.com]` or any number of +variations. As each Synapse configuration may want something different, this is +where SAML mapping providers come into play. + +## Enabling Providers + +External mapping providers are provided to Synapse in the form of an external +Python module. Retrieve this module from [PyPi](https://pypi.org) or elsewhere, +then tell Synapse where to look for the handler class by editing the +`saml2_config.user_mapping_provider.module` config option. + +`saml2_config.user_mapping_provider.config` allows you to provide custom +configuration options to the module. Check with the module's documentation for +what options it provides (if any). The options listed by default are for the +user mapping provider built in to Synapse. If using a custom module, you should +comment these options out and use those specified by the module instead. + +## Building a Custom Mapping Provider + +A custom mapping provider must specify the following methods: + +* `__init__(self, parsed_config)` + - Arguments: + - `parsed_config` - A configuration object that is the return value of the + `parse_config` method. You should set any configuration options needed by + the module here. +* `saml_response_to_user_attributes(self, saml_response, failures)` + - Arguments: + - `saml_response` - A `saml2.response.AuthnResponse` object to extract user + information from. + - `failures` - An `int` that represents the amount of times the returned + mxid localpart mapping has failed. This should be used + to create a deduplicated mxid localpart which should be + returned instead. For example, if this method returns + `john.doe` as the value of `mxid_localpart` in the returned + dict, and that is already taken on the homeserver, this + method will be called again with the same parameters but + with failures=1. The method should then return a different + `mxid_localpart` value, such as `john.doe1`. + - This method must return a dictionary, which will then be used by Synapse + to build a new user. The following keys are allowed: + * `mxid_localpart` - Required. The mxid localpart of the new user. + * `displayname` - The displayname of the new user. If not provided, will default to + the value of `mxid_localpart`. +* `parse_config(config)` + - This method should have the `@staticmethod` decoration. + - Arguments: + - `config` - A `dict` representing the parsed content of the + `saml2_config.user_mapping_provider.config` homeserver config option. + Runs on homeserver startup. Providers should extract any option values + they need here. + - Whatever is returned will be passed back to the user mapping provider module's + `__init__` method during construction. +* `get_saml_attributes(config)` + - This method should have the `@staticmethod` decoration. + - Arguments: + - `config` - A object resulting from a call to `parse_config`. + - Returns a tuple of two sets. The first set equates to the saml auth + response attributes that are required for the module to function, whereas + the second set consists of those attributes which can be used if available, + but are not necessary. + +## Synapse's Default Provider + +Synapse has a built-in SAML mapping provider if a custom provider isn't +specified in the config. It is located at +[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py). diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 10664ae8f7..05cd1750e6 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml
@@ -54,6 +54,13 @@ pid_file: DATADIR/homeserver.pid # #require_auth_for_profile_requests: true +# Whether to require a user to share a room with another user in order +# to retrieve their profile information. Only checked on Client-Server +# requests. Profile requests from other servers should be checked by the +# requesting server. Defaults to 'false'. +# +# limit_profile_requests_to_known_users: true + # If set to 'true', removes the need for authentication to access the server's # public rooms directory through the client API, meaning that anyone can # query the room directory. Defaults to 'false'. @@ -316,6 +323,74 @@ listeners: # #allow_per_room_profiles: false +# Whether to show the users on this homeserver in the user directory. Defaults to +# 'true'. +# +#show_users_in_user_directory: false + +# Message retention policy at the server level. +# +# Room admins and mods can define a retention period for their rooms using the +# 'm.room.retention' state event, and server admins can cap this period by setting +# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options. +# +# If this feature is enabled, Synapse will regularly look for and purge events +# which are older than the room's maximum retention period. Synapse will also +# filter events received over federation so that events that should have been +# purged are ignored and not stored again. +# +retention: + # The message retention policies feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # Default retention policy. If set, Synapse will apply it to rooms that lack the + # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't + # matter much because Synapse doesn't take it into account yet. + # + #default_policy: + # min_lifetime: 1d + # max_lifetime: 1y + + # Retention policy limits. If set, a user won't be able to send a + # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime' + # that's not within this range. This is especially useful in closed federations, + # in which server admins can make sure every federating server applies the same + # rules. + # + #allowed_lifetime_min: 1d + #allowed_lifetime_max: 1y + + # Server admins can define the settings of the background jobs purging the + # events which lifetime has expired under the 'purge_jobs' section. + # + # If no configuration is provided, a single job will be set up to delete expired + # events in every room daily. + # + # Each job's configuration defines which range of message lifetimes the job + # takes care of. For example, if 'shortest_max_lifetime' is '2d' and + # 'longest_max_lifetime' is '3d', the job will handle purging expired events in + # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and + # lower than or equal to 3 days. Both the minimum and the maximum value of a + # range are optional, e.g. a job with no 'shortest_max_lifetime' and a + # 'longest_max_lifetime' of '3d' will handle every room with a retention policy + # which 'max_lifetime' is lower than or equal to three days. + # + # The rationale for this per-job configuration is that some rooms might have a + # retention policy with a low 'max_lifetime', where history needs to be purged + # of outdated messages on a very frequent basis (e.g. every 5min), but not want + # that purge to be performed by a job that's iterating over every room it knows, + # which would be quite heavy on the server. + # + #purge_jobs: + # - shortest_max_lifetime: 1d + # longest_max_lifetime: 3d + # interval: 5m: + # - shortest_max_lifetime: 3d + # longest_max_lifetime: 1y + # interval: 24h + # How long to keep redacted events in unredacted form in the database. After # this period redacted events get replaced with their redacted form in the DB. # @@ -597,6 +672,8 @@ log_config: "CONFDIR/SERVERNAME.log.config" # - one for login that ratelimits login requests based on the account the # client is attempting to log into, based on the amount of failed login # attempts for this account. +# - one that ratelimits third-party invites requests based on the account +# that's making the requests. # - one for ratelimiting redactions by room admins. If this is not explicitly # set then it uses the same ratelimiting as per rc_message. This is useful # to allow room admins to deal with abuse quickly. @@ -622,6 +699,10 @@ log_config: "CONFDIR/SERVERNAME.log.config" # per_second: 0.17 # burst_count: 3 # +#rc_third_party_invite: +# per_second: 0.2 +# burst_count: 10 +# #rc_admin_redaction: # per_second: 1 # burst_count: 50 @@ -693,6 +774,30 @@ uploads_path: "DATADIR/uploads" # #max_upload_size: 10M +# The largest allowed size for a user avatar. If not defined, no +# restriction will be imposed. +# +# Note that this only applies when an avatar is changed globally. +# Per-room avatar changes are not affected. See allow_per_room_profiles +# for disabling that functionality. +# +# Note that user avatar changes will not work if this is set without +# using Synapse's local media repo. +# +#max_avatar_size: 10M + +# Allow mimetypes for a user avatar. If not defined, no restriction will +# be imposed. +# +# Note that this only applies when an avatar is changed globally. +# Per-room avatar changes are not affected. See allow_per_room_profiles +# for disabling that functionality. +# +# Note that user avatar changes will not work if this is set without +# using Synapse's local media repo. +# +#allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] + # Maximum number of pixels that will be thumbnailed # #max_image_pixels: 32M @@ -936,9 +1041,32 @@ uploads_path: "DATADIR/uploads" # #disable_msisdn_registration: true +# Derive the user's matrix ID from a type of 3PID used when registering. +# This overrides any matrix ID the user proposes when calling /register +# The 3PID type should be present in registrations_require_3pid to avoid +# users failing to register if they don't specify the right kind of 3pid. +# +#register_mxid_from_3pid: email + +# Uncomment to set the display name of new users to their email address, +# rather than using the default heuristic. +# +#register_just_use_email_for_display_name: true + # Mandate that users are only allowed to associate certain formats of # 3PIDs with accounts on this server. # +# Use an Identity Server to establish which 3PIDs are allowed to register? +# Overrides allowed_local_3pids below. +# +#check_is_for_allowed_local_3pids: matrix.org +# +# If you are using an IS you can also check whether that IS registers +# pending invites for the given 3PID (and then allow it to sign up on +# the platform): +# +#allow_invited_3pids: False +# #allowed_local_3pids: # - medium: email # pattern: '.*@matrix\.org' @@ -947,6 +1075,11 @@ uploads_path: "DATADIR/uploads" # - medium: msisdn # pattern: '\+44' +# If true, stop users from trying to change the 3PIDs associated with +# their accounts. +# +#disable_3pid_changes: False + # Enable 3PIDs lookup requests to identity servers from this server. # #enable_3pid_lookup: true @@ -996,6 +1129,30 @@ uploads_path: "DATADIR/uploads" # - matrix.org # - vector.im +# If enabled, user IDs, display names and avatar URLs will be replicated +# to this server whenever they change. +# This is an experimental API currently implemented by sydent to support +# cross-homeserver user directories. +# +#replicate_user_profiles_to: example.com + +# If specified, attempt to replay registrations, profile changes & 3pid +# bindings on the given target homeserver via the AS API. The HS is authed +# via a given AS token. +# +#shadow_server: +# hs_url: https://shadow.example.com +# hs: shadow.example.com +# as_token: 12u394refgbdhivsia + +# If enabled, don't let users set their own display names/avatars +# other than for the very first time (unless they are a server admin). +# Useful when provisioning users based on the contents of a 3rd party +# directory and to avoid ambiguities. +# +#disable_set_displayname: False +#disable_set_avatar_url: False + # Handle threepid (email/phone etc) registration and password resets through a set of # *trusted* identity servers. Note that this allows the configured identity server to # reset passwords for accounts! @@ -1250,33 +1407,58 @@ saml2_config: # #config_path: "CONFDIR/sp_conf.py" - # the lifetime of a SAML session. This defines how long a user has to + # The lifetime of a SAML session. This defines how long a user has to # complete the authentication process, if allow_unsolicited is unset. # The default is 5 minutes. # #saml_session_lifetime: 5m - # The SAML attribute (after mapping via the attribute maps) to use to derive - # the Matrix ID from. 'uid' by default. - # - #mxid_source_attribute: displayName - - # The mapping system to use for mapping the saml attribute onto a matrix ID. - # Options include: - # * 'hexencode' (which maps unpermitted characters to '=xx') - # * 'dotreplace' (which replaces unpermitted characters with '.'). - # The default is 'hexencode'. + # An external module can be provided here as a custom solution to + # mapping attributes returned from a saml provider onto a matrix user. # - #mxid_mapping: dotreplace + user_mapping_provider: + # The custom module's class. Uncomment to use a custom module. + # + #module: mapping_provider.SamlMappingProvider - # In previous versions of synapse, the mapping from SAML attribute to MXID was - # always calculated dynamically rather than stored in a table. For backwards- - # compatibility, we will look for user_ids matching such a pattern before - # creating a new account. + # Custom configuration values for the module. Below options are + # intended for the built-in provider, they should be changed if + # using a custom module. This section will be passed as a Python + # dictionary to the module's `parse_config` method. + # + config: + # The SAML attribute (after mapping via the attribute maps) to use + # to derive the Matrix ID from. 'uid' by default. + # + # Note: This used to be configured by the + # saml2_config.mxid_source_attribute option. If that is still + # defined, its value will be used instead. + # + #mxid_source_attribute: displayName + + # The mapping system to use for mapping the saml attribute onto a + # matrix ID. + # + # Options include: + # * 'hexencode' (which maps unpermitted characters to '=xx') + # * 'dotreplace' (which replaces unpermitted characters with + # '.'). + # The default is 'hexencode'. + # + # Note: This used to be configured by the + # saml2_config.mxid_mapping option. If that is still defined, its + # value will be used instead. + # + #mxid_mapping: dotreplace + + # In previous versions of synapse, the mapping from SAML attribute to + # MXID was always calculated dynamically rather than stored in a + # table. For backwards- compatibility, we will look for user_ids + # matching such a pattern before creating a new account. # # This setting controls the SAML attribute which will be used for this - # backwards-compatibility lookup. Typically it should be 'uid', but if the - # attribute maps are changed, it may be necessary to change it. + # backwards-compatibility lookup. Typically it should be 'uid', but if + # the attribute maps are changed, it may be necessary to change it. # # The default is 'uid'. # @@ -1319,6 +1501,36 @@ password_config: # #pepper: "EVEN_MORE_SECRET" + # Define and enforce a password policy. Each parameter is optional, boolean + # parameters default to 'false' and integer parameters default to 0. + # This is an early implementation of MSC2000. + # + #policy: + # Whether to enforce the password policy. + # + #enabled: true + + # Minimum accepted length for a password. + # + #minimum_length: 15 + + # Whether a password must contain at least one digit. + # + #require_digit: true + + # Whether a password must contain at least one symbol. + # A symbol is any character that's not a number or a letter. + # + #require_symbol: true + + # Whether a password must contain at least one lowercase letter. + # + #require_lowercase: true + + # Whether a password must contain at least one lowercase letter. + # + #require_uppercase: true + # Enable sending emails for password resets, notification events or @@ -1488,6 +1700,11 @@ password_config: #user_directory: # enabled: true # search_all_users: false +# +# # If this is set, user search will be delegated to this ID server instead +# # of synapse performing the search itself. +# # This is an experimental API. +# defer_to_id_server: https://id.example.com # User Consent configuration diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py
index ca4b879526..5c5a115ca9 100644 --- a/docs/sphinx/conf.py +++ b/docs/sphinx/conf.py
@@ -12,8 +12,8 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys import os +import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -191,11 +191,11 @@ htmlhelp_basename = "Synapsedoc" latex_elements = { # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', + # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', + # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. - #'preamble': '', + # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples diff --git a/docs/workers.md b/docs/workers.md
index 4bd60ba0a0..1b5d94f5eb 100644 --- a/docs/workers.md +++ b/docs/workers.md
@@ -196,7 +196,7 @@ Handles the media repository. It can handle all endpoints starting with: /_matrix/media/ -And the following regular expressions matching media-specific administration APIs: +... and the following regular expressions matching media-specific administration APIs: ^/_synapse/admin/v1/purge_media_cache$ ^/_synapse/admin/v1/room/.*/media$ @@ -206,6 +206,18 @@ You should also set `enable_media_repo: False` in the shared configuration file to stop the main synapse running background jobs related to managing the media repository. +In the `media_repository` worker configuration file, configure the http listener to +expose the `media` resource. For example: + +```yaml + worker_listeners: + - type: http + port: 8085 + resources: + - names: + - media +``` + Note this worker cannot be load-balanced: only one instance should be active. ### `synapse.app.client_reader` diff --git a/mypy.ini b/mypy.ini
index 1d77c0ecc8..a66434b76b 100644 --- a/mypy.ini +++ b/mypy.ini
@@ -1,7 +1,7 @@ [mypy] namespace_packages = True plugins = mypy_zope:plugin -follow_imports = normal +follow_imports = silent check_untyped_defs = True show_error_codes = True show_traceback = True diff --git a/res/templates-dinsic/mail-Vector.css b/res/templates-dinsic/mail-Vector.css new file mode 100644
index 0000000000..6a3e36eda1 --- /dev/null +++ b/res/templates-dinsic/mail-Vector.css
@@ -0,0 +1,7 @@ +.header { + border-bottom: 4px solid #e4f7ed ! important; +} + +.notif_link a, .footer a { + color: #76CFA6 ! important; +} diff --git a/res/templates-dinsic/mail.css b/res/templates-dinsic/mail.css new file mode 100644
index 0000000000..5ab3e1b06d --- /dev/null +++ b/res/templates-dinsic/mail.css
@@ -0,0 +1,156 @@ +body { + margin: 0px; +} + +pre, code { + word-break: break-word; + white-space: pre-wrap; +} + +#page { + font-family: 'Open Sans', Helvetica, Arial, Sans-Serif; + font-color: #454545; + font-size: 12pt; + width: 100%; + padding: 20px; +} + +#inner { + width: 640px; +} + +.header { + width: 100%; + height: 87px; + color: #454545; + border-bottom: 4px solid #e5e5e5; +} + +.logo { + text-align: right; + margin-left: 20px; +} + +.salutation { + padding-top: 10px; + font-weight: bold; +} + +.summarytext { +} + +.room { + width: 100%; + color: #454545; + border-bottom: 1px solid #e5e5e5; +} + +.room_header td { + padding-top: 38px; + padding-bottom: 10px; + border-bottom: 1px solid #e5e5e5; +} + +.room_name { + vertical-align: middle; + font-size: 18px; + font-weight: bold; +} + +.room_header h2 { + margin-top: 0px; + margin-left: 75px; + font-size: 20px; +} + +.room_avatar { + width: 56px; + line-height: 0px; + text-align: center; + vertical-align: middle; +} + +.room_avatar img { + width: 48px; + height: 48px; + object-fit: cover; + border-radius: 24px; +} + +.notif { + border-bottom: 1px solid #e5e5e5; + margin-top: 16px; + padding-bottom: 16px; +} + +.historical_message .sender_avatar { + opacity: 0.3; +} + +/* spell out opacity and historical_message class names for Outlook aka Word */ +.historical_message .sender_name { + color: #e3e3e3; +} + +.historical_message .message_time { + color: #e3e3e3; +} + +.historical_message .message_body { + color: #c7c7c7; +} + +.historical_message td, +.message td { + padding-top: 10px; +} + +.sender_avatar { + width: 56px; + text-align: center; + vertical-align: top; +} + +.sender_avatar img { + margin-top: -2px; + width: 32px; + height: 32px; + border-radius: 16px; +} + +.sender_name { + display: inline; + font-size: 13px; + color: #a2a2a2; +} + +.message_time { + text-align: right; + width: 100px; + font-size: 11px; + color: #a2a2a2; +} + +.message_body { +} + +.notif_link td { + padding-top: 10px; + padding-bottom: 10px; + font-weight: bold; +} + +.notif_link a, .footer a { + color: #454545; + text-decoration: none; +} + +.debug { + font-size: 10px; + color: #888; +} + +.footer { + margin-top: 20px; + text-align: center; +} \ No newline at end of file diff --git a/res/templates-dinsic/notif.html b/res/templates-dinsic/notif.html new file mode 100644
index 0000000000..bcdfeea9da --- /dev/null +++ b/res/templates-dinsic/notif.html
@@ -0,0 +1,45 @@ +{% for message in notif.messages %} + <tr class="{{ "historical_message" if message.is_historical else "message" }}"> + <td class="sender_avatar"> + {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} + {% if message.sender_avatar_url %} + <img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" /> + {% else %} + {% if message.sender_hash % 3 == 0 %} + <img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" /> + {% elif message.sender_hash % 3 == 1 %} + <img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" /> + {% else %} + <img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" /> + {% endif %} + {% endif %} + {% endif %} + </td> + <td class="message_contents"> + {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %} + <div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div> + {% endif %} + <div class="message_body"> + {% if message.msgtype == "m.text" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.emote" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.notice" %} + {{ message.body_text_html }} + {% elif message.msgtype == "m.image" %} + <img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" /> + {% elif message.msgtype == "m.file" %} + <span class="filename">{{ message.body_text_plain }}</span> + {% endif %} + </div> + </td> + <td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td> + </tr> +{% endfor %} +<tr class="notif_link"> + <td></td> + <td> + <a href="{{ notif.link }}">Voir {{ room.title }}</a> + </td> + <td></td> +</tr> diff --git a/res/templates-dinsic/notif.txt b/res/templates-dinsic/notif.txt new file mode 100644
index 0000000000..3dff1bb570 --- /dev/null +++ b/res/templates-dinsic/notif.txt
@@ -0,0 +1,16 @@ +{% for message in notif.messages %} +{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }}) +{% if message.msgtype == "m.text" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.emote" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.notice" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.image" %} +{{ message.body_text_plain }} +{% elif message.msgtype == "m.file" %} +{{ message.body_text_plain }} +{% endif %} +{% endfor %} + +Voir {{ room.title }} à {{ notif.link }} diff --git a/res/templates-dinsic/notif_mail.html b/res/templates-dinsic/notif_mail.html new file mode 100644
index 0000000000..1e1efa74b2 --- /dev/null +++ b/res/templates-dinsic/notif_mail.html
@@ -0,0 +1,55 @@ +<!doctype html> +<html lang="en"> + <head> + <style type="text/css"> + {% include 'mail.css' without context %} + {% include "mail-%s.css" % app_name ignore missing without context %} + </style> + </head> + <body> + <table id="page"> + <tr> + <td> </td> + <td id="inner"> + <table class="header"> + <tr> + <td> + <div class="salutation">Bonjour {{ user_display_name }},</div> + <div class="summarytext">{{ summary_text }}</div> + </td> + <td class="logo"> + {% if app_name == "Riot" %} + <img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/> + {% elif app_name == "Vector" %} + <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/> + {% else %} + <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/> + {% endif %} + </td> + </tr> + </table> + {% for room in rooms %} + {% include 'room.html' with context %} + {% endfor %} + <div class="footer"> + <a href="{{ unsubscribe_link }}">Se désinscrire</a> + <br/> + <br/> + <div class="debug"> + Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because + an event was received at {{ reason.received_at|format_ts("%c") }} + which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago, + {% if reason.last_sent_ts %} + and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }}, + which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago. + {% else %} + and we don't have a last time we sent a mail for this room. + {% endif %} + </div> + </div> + </td> + <td> </td> + </tr> + </table> + </body> +</html> diff --git a/res/templates-dinsic/notif_mail.txt b/res/templates-dinsic/notif_mail.txt new file mode 100644
index 0000000000..fae877426f --- /dev/null +++ b/res/templates-dinsic/notif_mail.txt
@@ -0,0 +1,10 @@ +Bonjour {{ user_display_name }}, + +{{ summary_text }} + +{% for room in rooms %} +{% include 'room.txt' with context %} +{% endfor %} + +Vous pouvez désactiver ces notifications en cliquant ici {{ unsubscribe_link }} + diff --git a/res/templates-dinsic/room.html b/res/templates-dinsic/room.html new file mode 100644
index 0000000000..0487b1b11c --- /dev/null +++ b/res/templates-dinsic/room.html
@@ -0,0 +1,33 @@ +<table class="room"> + <tr class="room_header"> + <td class="room_avatar"> + {% if room.avatar_url %} + <img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" /> + {% else %} + {% if room.hash % 3 == 0 %} + <img alt="" src="https://vector.im/beta/img/76cfa6.png" /> + {% elif room.hash % 3 == 1 %} + <img alt="" src="https://vector.im/beta/img/50e2c2.png" /> + {% else %} + <img alt="" src="https://vector.im/beta/img/f4c371.png" /> + {% endif %} + {% endif %} + </td> + <td class="room_name" colspan="2"> + {{ room.title }} + </td> + </tr> + {% if room.invite %} + <tr> + <td></td> + <td> + <a href="{{ room.link }}">Rejoindre la conversation.</a> + </td> + <td></td> + </tr> + {% else %} + {% for notif in room.notifs %} + {% include 'notif.html' with context %} + {% endfor %} + {% endif %} +</table> diff --git a/res/templates-dinsic/room.txt b/res/templates-dinsic/room.txt new file mode 100644
index 0000000000..dd36d01d21 --- /dev/null +++ b/res/templates-dinsic/room.txt
@@ -0,0 +1,9 @@ +{{ room.title }} + +{% if room.invite %} +   Vous avez été invité, rejoignez la conversation en cliquant sur le lien suivant {{ room.link }} +{% else %} + {% for notif in room.notifs %} + {% include 'notif.txt' with context %} + {% endfor %} +{% endif %} diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment
index 0ec5075e79..b8a85abe18 100755 --- a/scripts-dev/check-newsfragment +++ b/scripts-dev/check-newsfragment
@@ -5,9 +5,9 @@ set -e -# make sure that origin/develop is up to date -git remote set-branches --add origin develop -git fetch origin develop +# make sure that origin/dinsic is up to date +git remote set-branches --add origin dinsic +git fetch origin dinsic # if there are changes in the debian directory, check that the debian changelog # has been updated diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh new file mode 100755
index 0000000000..60e8970a35 --- /dev/null +++ b/scripts-dev/make_full_schema.sh
@@ -0,0 +1,184 @@ +#!/bin/bash +# +# This script generates SQL files for creating a brand new Synapse DB with the latest +# schema, on both SQLite3 and Postgres. +# +# It does so by having Synapse generate an up-to-date SQLite DB, then running +# synapse_port_db to convert it to Postgres. It then dumps the contents of both. + +POSTGRES_HOST="localhost" +POSTGRES_DB_NAME="synapse_full_schema.$$" + +SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite" +POSTGRES_FULL_SCHEMA_OUTPUT_FILE="full.sql.postgres" + +REQUIRED_DEPS=("matrix-synapse" "psycopg2") + +usage() { + echo + echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n] [-h]" + echo + echo "-p <postgres_username>" + echo " Username to connect to local postgres instance. The password will be requested" + echo " during script execution." + echo "-c" + echo " CI mode. Enables coverage tracking and prints every command that the script runs." + echo "-o <path>" + echo " Directory to output full schema files to." + echo "-h" + echo " Display this help text." +} + +while getopts "p:co:h" opt; do + case $opt in + p) + POSTGRES_USERNAME=$OPTARG + ;; + c) + # Print all commands that are being executed + set -x + + # Modify required dependencies for coverage + REQUIRED_DEPS+=("coverage" "coverage-enable-subprocess") + + COVERAGE=1 + ;; + o) + command -v realpath > /dev/null || (echo "The -o flag requires the 'realpath' binary to be installed" && exit 1) + OUTPUT_DIR="$(realpath "$OPTARG")" + ;; + h) + usage + exit + ;; + \?) + echo "ERROR: Invalid option: -$OPTARG" >&2 + usage + exit + ;; + esac +done + +# Check that required dependencies are installed +unsatisfied_requirements=() +for dep in "${REQUIRED_DEPS[@]}"; do + pip show "$dep" --quiet || unsatisfied_requirements+=("$dep") +done +if [ ${#unsatisfied_requirements} -ne 0 ]; then + echo "Please install the following python packages: ${unsatisfied_requirements[*]}" + exit 1 +fi + +if [ -z "$POSTGRES_USERNAME" ]; then + echo "No postgres username supplied" + usage + exit 1 +fi + +if [ -z "$OUTPUT_DIR" ]; then + echo "No output directory supplied" + usage + exit 1 +fi + +# Create the output directory if it doesn't exist +mkdir -p "$OUTPUT_DIR" + +read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD +echo "" + +# Exit immediately if a command fails +set -e + +# cd to root of the synapse directory +cd "$(dirname "$0")/.." + +# Create temporary SQLite and Postgres homeserver db configs and key file +TMPDIR=$(mktemp -d) +KEY_FILE=$TMPDIR/test.signing.key # default Synapse signing key path +SQLITE_CONFIG=$TMPDIR/sqlite.conf +SQLITE_DB=$TMPDIR/homeserver.db +POSTGRES_CONFIG=$TMPDIR/postgres.conf + +# Ensure these files are delete on script exit +trap 'rm -rf $TMPDIR' EXIT + +cat > "$SQLITE_CONFIG" <<EOF +server_name: "test" + +signing_key_path: "$KEY_FILE" +macaroon_secret_key: "abcde" + +report_stats: false + +database: + name: "sqlite3" + args: + database: "$SQLITE_DB" + +# Suppress the key server warning. +trusted_key_servers: [] +EOF + +cat > "$POSTGRES_CONFIG" <<EOF +server_name: "test" + +signing_key_path: "$KEY_FILE" +macaroon_secret_key: "abcde" + +report_stats: false + +database: + name: "psycopg2" + args: + user: "$POSTGRES_USERNAME" + host: "$POSTGRES_HOST" + password: "$POSTGRES_PASSWORD" + database: "$POSTGRES_DB_NAME" + +# Suppress the key server warning. +trusted_key_servers: [] +EOF + +# Generate the server's signing key. +echo "Generating SQLite3 db schema..." +python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG" + +# Make sure the SQLite3 database is using the latest schema and has no pending background update. +echo "Running db background jobs..." +scripts-dev/update_database --database-config "$SQLITE_CONFIG" + +# Create the PostgreSQL database. +echo "Creating postgres database..." +createdb $POSTGRES_DB_NAME + +echo "Copying data from SQLite3 to Postgres with synapse_port_db..." +if [ -z "$COVERAGE" ]; then + # No coverage needed + scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" +else + # Coverage desired + coverage run scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" +fi + +# Delete schema_version, applied_schema_deltas and applied_module_schemas tables +# This needs to be done after synapse_port_db is run +echo "Dropping unwanted db tables..." +SQL=" +DROP TABLE schema_version; +DROP TABLE applied_schema_deltas; +DROP TABLE applied_module_schemas; +" +sqlite3 "$SQLITE_DB" <<< "$SQL" +psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL" + +echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..." +sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE" + +echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE'..." +pg_dump --format=plain --no-tablespaces --no-acl --no-owner $POSTGRES_DB_NAME | sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE" + +echo "Cleaning up temporary Postgres database..." +dropdb $POSTGRES_DB_NAME + +echo "Done! Files dumped to: $OUTPUT_DIR" diff --git a/scripts-dev/update_database b/scripts-dev/update_database
index 1776d202c5..23017c21f8 100755 --- a/scripts-dev/update_database +++ b/scripts-dev/update_database
@@ -26,7 +26,6 @@ from synapse.config.homeserver import HomeServerConfig from synapse.metrics.background_process_metrics import run_as_background_process from synapse.server import HomeServer from synapse.storage import DataStore -from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database logger = logging.getLogger("update_database") @@ -35,21 +34,11 @@ logger = logging.getLogger("update_database") class MockHomeserver(HomeServer): DATASTORE_CLASS = DataStore - def __init__(self, config, database_engine, db_conn, **kwargs): + def __init__(self, config, **kwargs): super(MockHomeserver, self).__init__( - config.server_name, - reactor=reactor, - config=config, - database_engine=database_engine, - **kwargs + config.server_name, reactor=reactor, config=config, **kwargs ) - self.database_engine = database_engine - self.db_conn = db_conn - - def get_db_conn(self): - return self.db_conn - if __name__ == "__main__": parser = argparse.ArgumentParser( @@ -85,24 +74,14 @@ if __name__ == "__main__": config = HomeServerConfig() config.parse_config_dict(hs_config, "", "") - # Create the database engine and a connection to it. - database_engine = create_engine(config.database_config) - db_conn = database_engine.module.connect( - **{ - k: v - for k, v in config.database_config.get("args", {}).items() - if not k.startswith("cp_") - } - ) + # Instantiate and initialise the homeserver object. + hs = MockHomeserver(config) + db_conn = hs.get_db_conn() # Update the database to the latest schema. - prepare_database(db_conn, database_engine, config=config) + prepare_database(db_conn, hs.database_engine, config=config) db_conn.commit() - # Instantiate and initialise the homeserver object. - hs = MockHomeserver( - config, database_engine, db_conn, db_config=config.database_config, - ) # setup instantiates the store within the homeserver object. hs.setup() store = hs.get_datastore() diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 5d0b7d2801..c12c7072a1 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py
@@ -197,6 +197,7 @@ class Auth(object): access_token = self.get_access_token_from_request(request) user_id, app_service = yield self._get_appservice_user_id(request) + if user_id: request.authenticated_entity = user_id opentracing.set_tag("authenticated_entity", user_id) @@ -261,11 +262,11 @@ class Auth(object): except KeyError: raise MissingClientTokenError() - @defer.inlineCallbacks def _get_appservice_user_id(self, request): app_service = self.store.get_app_service_by_token( self.get_access_token_from_request(request) ) + if app_service is None: return None, None @@ -283,8 +284,12 @@ class Auth(object): if not app_service.is_interested_in_user(user_id): raise AuthError(403, "Application service cannot masquerade as this user.") - if not (yield self.store.get_user_by_id(user_id)): - raise AuthError(403, "Application service has not registered this user") + # Let ASes manipulate nonexistent users (e.g. to shadow-register them) + # if not (yield self.store.get_user_by_id(user_id)): + # raise AuthError( + # 403, + # "Application service has not registered this user" + # ) return user_id, app_service @defer.inlineCallbacks diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index 0ade47e624..405d08b2cc 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py
@@ -86,6 +86,7 @@ class EventTypes(object): RoomAvatar = "m.room.avatar" RoomEncryption = "m.room.encryption" GuestAccess = "m.room.guest_access" + Encryption = "m.room.encryption" # These are used for validation Message = "m.room.message" diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 5853a54c95..ee2a295300 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py
@@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -64,6 +65,13 @@ class Codes(object): EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT" INVALID_SIGNATURE = "M_INVALID_SIGNATURE" USER_DEACTIVATED = "M_USER_DEACTIVATED" + PASSWORD_TOO_SHORT = "M_PASSWORD_TOO_SHORT" + PASSWORD_NO_DIGIT = "M_PASSWORD_NO_DIGIT" + PASSWORD_NO_UPPERCASE = "M_PASSWORD_NO_UPPERCASE" + PASSWORD_NO_LOWERCASE = "M_PASSWORD_NO_LOWERCASE" + PASSWORD_NO_SYMBOL = "M_PASSWORD_NO_SYMBOL" + PASSWORD_IN_DICTIONARY = "M_PASSWORD_IN_DICTIONARY" + WEAK_PASSWORD = "M_WEAK_PASSWORD" class CodeMessageException(RuntimeError): @@ -421,6 +429,18 @@ class IncompatibleRoomVersionError(SynapseError): return cs_error(self.msg, self.errcode, room_version=self._room_version) +class PasswordRefusedError(SynapseError): + """A password has been refused, either during password reset/change or registration. + """ + + def __init__( + self, + msg="This password doesn't comply with the server's policy", + errcode=Codes.WEAK_PASSWORD, + ): + super(PasswordRefusedError, self).__init__(code=400, msg=msg, errcode=errcode) + + class RequestSendFailed(RuntimeError): """Sending a HTTP request over federation failed due to not being able to talk to the remote server for some reason. diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 04751a6a5e..51a909419f 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py
@@ -45,7 +45,6 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto from synapse.replication.slave.storage.room import RoomStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer -from synapse.storage.engines import create_engine from synapse.util.logcontext import LoggingContext from synapse.util.versionstring import get_version_string @@ -229,14 +228,10 @@ def start(config_options): synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - ss = AdminCmdServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py
index 02b900f382..e82e0f11e3 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py
@@ -34,7 +34,6 @@ from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer -from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string @@ -143,8 +142,6 @@ def start(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - if config.notify_appservices: sys.stderr.write( "\nThe appservices must be disabled in the main synapse process" @@ -159,10 +156,8 @@ def start(config_options): ps = AppserviceServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) setup_logging(ps, config, use_worker_options=True) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index dadb487d5f..3edfe19567 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py
@@ -62,7 +62,6 @@ from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet from synapse.rest.client.v2_alpha.register import RegisterRestServlet from synapse.rest.client.versions import VersionsRestServlet from synapse.server import HomeServer -from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string @@ -181,14 +180,10 @@ def start(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - ss = ClientReaderServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py
index d110599a35..d0ddbe38fc 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py
@@ -57,7 +57,6 @@ from synapse.rest.client.v1.room import ( ) from synapse.server import HomeServer from synapse.storage.data_stores.main.user_directory import UserDirectoryStore -from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string @@ -180,14 +179,10 @@ def start(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - ss = EventCreatorServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 418c086254..311523e0ed 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py
@@ -46,7 +46,6 @@ from synapse.replication.slave.storage.transactions import SlavedTransactionStor from synapse.replication.tcp.client import ReplicationClientHandler from synapse.rest.key.v2 import KeyApiV2Resource from synapse.server import HomeServer -from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string @@ -162,14 +161,10 @@ def start(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - ss = FederationReaderServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py
index f24920a7d6..83c436229c 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py
@@ -41,7 +41,6 @@ from synapse.replication.tcp.client import ReplicationClientHandler from synapse.replication.tcp.streams._base import ReceiptsStream from synapse.server import HomeServer from synapse.storage.database import Database -from synapse.storage.engines import create_engine from synapse.types import ReadReceipt from synapse.util.async_helpers import Linearizer from synapse.util.httpresourcetree import create_resource_tree @@ -174,8 +173,6 @@ def start(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - if config.send_federation: sys.stderr.write( "\nThe send_federation must be disabled in the main synapse process" @@ -190,10 +187,8 @@ def start(config_options): ss = FederationSenderServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py
index e647459d0e..30e435eead 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py
@@ -39,7 +39,6 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto from synapse.replication.tcp.client import ReplicationClientHandler from synapse.rest.client.v2_alpha._base import client_patterns from synapse.server import HomeServer -from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string @@ -234,14 +233,10 @@ def start(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - ss = FrontendProxyServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index df65d0a989..b8661457e2 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py
@@ -69,7 +69,7 @@ from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.rest.well_known import WellKnownResource from synapse.server import HomeServer from synapse.storage import DataStore -from synapse.storage.engines import IncorrectDatabaseSetup, create_engine +from synapse.storage.engines import IncorrectDatabaseSetup from synapse.storage.prepare_database import UpgradeDatabaseException from synapse.util.caches import CACHE_SIZE_FACTOR from synapse.util.httpresourcetree import create_resource_tree @@ -328,15 +328,10 @@ def setup(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection - hs = SynapseHomeServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) synapse.config.logger.setup_logging(hs, config, use_worker_options=False) @@ -519,8 +514,10 @@ def phone_stats_home(hs, stats, stats_process=_stats_process): # Database version # - stats["database_engine"] = hs.database_engine.module.__name__ - stats["database_server_version"] = hs.database_engine.server_version + # This only reports info about the *main* database. + stats["database_engine"] = hs.get_datastore().db.engine.module.__name__ + stats["database_server_version"] = hs.get_datastore().db.engine.server_version + logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) try: yield hs.get_proxied_http_client().put_json( diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py
index 2c6dd3ef02..4c80f257e2 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py
@@ -40,7 +40,6 @@ from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.server import HomeServer from synapse.storage.data_stores.main.media_repository import MediaRepositoryStore -from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string @@ -157,14 +156,10 @@ def start(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - ss = MediaRepositoryServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index dd52a9fc2d..09e639040a 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py
@@ -37,7 +37,6 @@ from synapse.replication.slave.storage.room import RoomStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.server import HomeServer from synapse.storage import DataStore -from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.versionstring import get_version_string @@ -203,14 +202,10 @@ def start(config_options): # Force the pushers to start since they will be disabled in the main config config.start_pushers = True - database_engine = create_engine(config.database_config) - ps = PusherServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) setup_logging(ps, config, use_worker_options=True) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 288ee64b42..dd2132e608 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py
@@ -55,7 +55,6 @@ from synapse.rest.client.v1.room import RoomInitialSyncRestServlet from synapse.rest.client.v2_alpha import sync from synapse.server import HomeServer from synapse.storage.data_stores.main.presence import UserPresenceState -from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole from synapse.util.stringutils import random_string @@ -437,14 +436,10 @@ def start(config_options): synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - ss = SynchrotronServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, application_service_handler=SynchrotronApplicationService(), ) diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py
index c01fb34a9b..1257098f92 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py
@@ -44,7 +44,6 @@ from synapse.rest.client.v2_alpha import user_directory from synapse.server import HomeServer from synapse.storage.data_stores.main.user_directory import UserDirectoryStore from synapse.storage.database import Database -from synapse.storage.engines import create_engine from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.httpresourcetree import create_resource_tree from synapse.util.manhole import manhole @@ -200,8 +199,6 @@ def start(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts - database_engine = create_engine(config.database_config) - if config.update_user_directory: sys.stderr.write( "\nThe update_user_directory must be disabled in the main synapse process" @@ -216,10 +213,8 @@ def start(config_options): ss = UserDirectoryServer( config.server_name, - db_config=config.database_config, config=config, version_string="Synapse/" + get_version_string(synapse), - database_engine=database_engine, ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index aea3985a5f..1b13e84425 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py
@@ -270,7 +270,7 @@ class ApplicationService(object): def is_exclusive_room(self, room_id): return self._is_exclusive(ApplicationService.NS_ROOMS, room_id) - def get_exlusive_user_regexes(self): + def get_exclusive_user_regexes(self): """Get the list of regexes used to determine if a user is exclusively registered by the AS """ diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 08619404bb..e865fd8f41 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py
@@ -18,6 +18,7 @@ import argparse import errno import os +from io import open as io_open from collections import OrderedDict from textwrap import dedent from typing import Any, MutableMapping, Optional @@ -169,7 +170,7 @@ class Config(object): @classmethod def read_file(cls, file_path, config_name): cls.check_file(file_path, config_name) - with open(file_path) as file_stream: + with io_open(file_path, encoding="utf-8") as file_stream: return file_stream.read() diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 18f42a87f9..35756bed87 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py
@@ -21,6 +21,7 @@ from __future__ import print_function import email.utils import os from enum import Enum +from typing import Optional import pkg_resources @@ -101,7 +102,7 @@ class EmailConfig(Config): # both in RegistrationConfig and here. We should factor this bit out self.account_threepid_delegate_email = self.trusted_third_party_id_servers[ 0 - ] + ] # type: Optional[str] self.using_identity_server_from_trusted_list = True else: raise ConfigError( diff --git a/synapse/config/password.py b/synapse/config/password.py
index 2a634ac751..2c13810ab8 100644 --- a/synapse/config/password.py +++ b/synapse/config/password.py
@@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2015-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -31,6 +33,10 @@ class PasswordConfig(Config): self.password_localdb_enabled = password_config.get("localdb_enabled", True) self.password_pepper = password_config.get("pepper", "") + # Password policy + self.password_policy = password_config.get("policy", {}) + self.password_policy_enabled = self.password_policy.pop("enabled", False) + def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ password_config: @@ -48,4 +54,34 @@ class PasswordConfig(Config): # DO NOT CHANGE THIS AFTER INITIAL SETUP! # #pepper: "EVEN_MORE_SECRET" + + # Define and enforce a password policy. Each parameter is optional, boolean + # parameters default to 'false' and integer parameters default to 0. + # This is an early implementation of MSC2000. + # + #policy: + # Whether to enforce the password policy. + # + #enabled: true + + # Minimum accepted length for a password. + # + #minimum_length: 15 + + # Whether a password must contain at least one digit. + # + #require_digit: true + + # Whether a password must contain at least one symbol. + # A symbol is any character that's not a number or a letter. + # + #require_symbol: true + + # Whether a password must contain at least one lowercase letter. + # + #require_lowercase: true + + # Whether a password must contain at least one lowercase letter. + # + #require_uppercase: true """ diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 947f653e03..dbc3dd7a2c 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py
@@ -70,6 +70,9 @@ class RatelimitConfig(Config): ) self.rc_registration = RateLimitConfig(config.get("rc_registration", {})) + self.rc_third_party_invite = RateLimitConfig( + config.get("rc_third_party_invite", {}) + ) rc_login_config = config.get("rc_login", {}) self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {})) @@ -83,10 +86,9 @@ class RatelimitConfig(Config): ) rc_admin_redaction = config.get("rc_admin_redaction") + self.rc_admin_redaction = None if rc_admin_redaction: self.rc_admin_redaction = RateLimitConfig(rc_admin_redaction) - else: - self.rc_admin_redaction = None def generate_config_section(self, **kwargs): return """\ @@ -110,6 +112,8 @@ class RatelimitConfig(Config): # - one for login that ratelimits login requests based on the account the # client is attempting to log into, based on the amount of failed login # attempts for this account. + # - one that ratelimits third-party invites requests based on the account + # that's making the requests. # - one for ratelimiting redactions by room admins. If this is not explicitly # set then it uses the same ratelimiting as per rc_message. This is useful # to allow room admins to deal with abuse quickly. @@ -135,6 +139,10 @@ class RatelimitConfig(Config): # per_second: 0.17 # burst_count: 3 # + #rc_third_party_invite: + # per_second: 0.2 + # burst_count: 10 + # #rc_admin_redaction: # per_second: 1 # burst_count: 50 diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index ee9614c5f7..44dc4ff550 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py
@@ -96,8 +96,19 @@ class RegistrationConfig(Config): self.registrations_require_3pid = config.get("registrations_require_3pid", []) self.allowed_local_3pids = config.get("allowed_local_3pids", []) + self.check_is_for_allowed_local_3pids = config.get( + "check_is_for_allowed_local_3pids", None + ) + self.allow_invited_3pids = config.get("allow_invited_3pids", False) + + self.disable_3pid_changes = config.get("disable_3pid_changes", False) + self.enable_3pid_lookup = config.get("enable_3pid_lookup", True) self.registration_shared_secret = config.get("registration_shared_secret") + self.register_mxid_from_3pid = config.get("register_mxid_from_3pid") + self.register_just_use_email_for_display_name = config.get( + "register_just_use_email_for_display_name", False + ) self.bcrypt_rounds = config.get("bcrypt_rounds", 12) self.trusted_third_party_id_servers = config.get( @@ -126,6 +137,18 @@ class RegistrationConfig(Config): raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,)) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) + self.disable_set_displayname = config.get("disable_set_displayname", False) + self.disable_set_avatar_url = config.get("disable_set_avatar_url", False) + + self.replicate_user_profiles_to = config.get("replicate_user_profiles_to", []) + if not isinstance(self.replicate_user_profiles_to, list): + self.replicate_user_profiles_to = [self.replicate_user_profiles_to] + + self.shadow_server = config.get("shadow_server", None) + self.rewrite_identity_server_urls = config.get( + "rewrite_identity_server_urls", {} + ) + self.disable_msisdn_registration = config.get( "disable_msisdn_registration", False ) @@ -224,9 +247,32 @@ class RegistrationConfig(Config): # #disable_msisdn_registration: true + # Derive the user's matrix ID from a type of 3PID used when registering. + # This overrides any matrix ID the user proposes when calling /register + # The 3PID type should be present in registrations_require_3pid to avoid + # users failing to register if they don't specify the right kind of 3pid. + # + #register_mxid_from_3pid: email + + # Uncomment to set the display name of new users to their email address, + # rather than using the default heuristic. + # + #register_just_use_email_for_display_name: true + # Mandate that users are only allowed to associate certain formats of # 3PIDs with accounts on this server. # + # Use an Identity Server to establish which 3PIDs are allowed to register? + # Overrides allowed_local_3pids below. + # + #check_is_for_allowed_local_3pids: matrix.org + # + # If you are using an IS you can also check whether that IS registers + # pending invites for the given 3PID (and then allow it to sign up on + # the platform): + # + #allow_invited_3pids: False + # #allowed_local_3pids: # - medium: email # pattern: '.*@matrix\\.org' @@ -235,6 +281,11 @@ class RegistrationConfig(Config): # - medium: msisdn # pattern: '\\+44' + # If true, stop users from trying to change the 3PIDs associated with + # their accounts. + # + #disable_3pid_changes: False + # Enable 3PIDs lookup requests to identity servers from this server. # #enable_3pid_lookup: true @@ -284,6 +335,30 @@ class RegistrationConfig(Config): # - matrix.org # - vector.im + # If enabled, user IDs, display names and avatar URLs will be replicated + # to this server whenever they change. + # This is an experimental API currently implemented by sydent to support + # cross-homeserver user directories. + # + #replicate_user_profiles_to: example.com + + # If specified, attempt to replay registrations, profile changes & 3pid + # bindings on the given target homeserver via the AS API. The HS is authed + # via a given AS token. + # + #shadow_server: + # hs_url: https://shadow.example.com + # hs: shadow.example.com + # as_token: 12u394refgbdhivsia + + # If enabled, don't let users set their own display names/avatars + # other than for the very first time (unless they are a server admin). + # Useful when provisioning users based on the contents of a 3rd party + # directory and to avoid ambiguities. + # + #disable_set_displayname: False + #disable_set_avatar_url: False + # Handle threepid (email/phone etc) registration and password resets through a set of # *trusted* identity servers. Note that this allows the configured identity server to # reset passwords for accounts! diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index d0205e14b9..6072aa56d3 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py
@@ -97,6 +97,12 @@ class ContentRepositoryConfig(Config): self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M")) self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M")) + self.max_avatar_size = config.get("max_avatar_size") + if self.max_avatar_size: + self.max_avatar_size = self.parse_size(self.max_avatar_size) + + self.allowed_avatar_mimetypes = config.get("allowed_avatar_mimetypes", []) + self.media_store_path = self.ensure_directory( config.get("media_store_path", "media_store") ) @@ -239,6 +245,30 @@ class ContentRepositoryConfig(Config): # #max_upload_size: 10M + # The largest allowed size for a user avatar. If not defined, no + # restriction will be imposed. + # + # Note that this only applies when an avatar is changed globally. + # Per-room avatar changes are not affected. See allow_per_room_profiles + # for disabling that functionality. + # + # Note that user avatar changes will not work if this is set without + # using Synapse's local media repo. + # + #max_avatar_size: 10M + + # Allow mimetypes for a user avatar. If not defined, no restriction will + # be imposed. + # + # Note that this only applies when an avatar is changed globally. + # Per-room avatar changes are not affected. See allow_per_room_profiles + # for disabling that functionality. + # + # Note that user avatar changes will not work if this is set without + # using Synapse's local media repo. + # + #allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] + # Maximum number of pixels that will be thumbnailed # #max_image_pixels: 32M diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py
index c5ea2d43a1..b91414aa35 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py
@@ -14,17 +14,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import re +import logging from synapse.python_dependencies import DependencyException, check_requirements -from synapse.types import ( - map_username_to_mxid_localpart, - mxid_localpart_allowed_characters, -) -from synapse.util.module_loader import load_python_module +from synapse.util.module_loader import load_module, load_python_module from ._base import Config, ConfigError +logger = logging.getLogger(__name__) + +DEFAULT_USER_MAPPING_PROVIDER = ( + "synapse.handlers.saml_handler.DefaultSamlMappingProvider" +) + def _dict_merge(merge_dict, into_dict): """Do a deep merge of two dicts @@ -75,15 +77,69 @@ class SAML2Config(Config): self.saml2_enabled = True - self.saml2_mxid_source_attribute = saml2_config.get( - "mxid_source_attribute", "uid" - ) - self.saml2_grandfathered_mxid_source_attribute = saml2_config.get( "grandfathered_mxid_source_attribute", "uid" ) - saml2_config_dict = self._default_saml_config_dict() + # user_mapping_provider may be None if the key is present but has no value + ump_dict = saml2_config.get("user_mapping_provider") or {} + + # Use the default user mapping provider if not set + ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) + + # Ensure a config is present + ump_dict["config"] = ump_dict.get("config") or {} + + if ump_dict["module"] == DEFAULT_USER_MAPPING_PROVIDER: + # Load deprecated options for use by the default module + old_mxid_source_attribute = saml2_config.get("mxid_source_attribute") + if old_mxid_source_attribute: + logger.warning( + "The config option saml2_config.mxid_source_attribute is deprecated. " + "Please use saml2_config.user_mapping_provider.config" + ".mxid_source_attribute instead." + ) + ump_dict["config"]["mxid_source_attribute"] = old_mxid_source_attribute + + old_mxid_mapping = saml2_config.get("mxid_mapping") + if old_mxid_mapping: + logger.warning( + "The config option saml2_config.mxid_mapping is deprecated. Please " + "use saml2_config.user_mapping_provider.config.mxid_mapping instead." + ) + ump_dict["config"]["mxid_mapping"] = old_mxid_mapping + + # Retrieve an instance of the module's class + # Pass the config dictionary to the module for processing + ( + self.saml2_user_mapping_provider_class, + self.saml2_user_mapping_provider_config, + ) = load_module(ump_dict) + + # Ensure loaded user mapping module has defined all necessary methods + # Note parse_config() is already checked during the call to load_module + required_methods = [ + "get_saml_attributes", + "saml_response_to_user_attributes", + ] + missing_methods = [ + method + for method in required_methods + if not hasattr(self.saml2_user_mapping_provider_class, method) + ] + if missing_methods: + raise ConfigError( + "Class specified by saml2_config." + "user_mapping_provider.module is missing required " + "methods: %s" % (", ".join(missing_methods),) + ) + + # Get the desired saml auth response attributes from the module + saml2_config_dict = self._default_saml_config_dict( + *self.saml2_user_mapping_provider_class.get_saml_attributes( + self.saml2_user_mapping_provider_config + ) + ) _dict_merge( merge_dict=saml2_config.get("sp_config", {}), into_dict=saml2_config_dict ) @@ -103,22 +159,27 @@ class SAML2Config(Config): saml2_config.get("saml_session_lifetime", "5m") ) - mapping = saml2_config.get("mxid_mapping", "hexencode") - try: - self.saml2_mxid_mapper = MXID_MAPPER_MAP[mapping] - except KeyError: - raise ConfigError("%s is not a known mxid_mapping" % (mapping,)) - - def _default_saml_config_dict(self): + def _default_saml_config_dict( + self, required_attributes: set, optional_attributes: set + ): + """Generate a configuration dictionary with required and optional attributes that + will be needed to process new user registration + + Args: + required_attributes: SAML auth response attributes that are + necessary to function + optional_attributes: SAML auth response attributes that can be used to add + additional information to Synapse user accounts, but are not required + + Returns: + dict: A SAML configuration dictionary + """ import saml2 public_baseurl = self.public_baseurl if public_baseurl is None: raise ConfigError("saml2_config requires a public_baseurl to be set") - required_attributes = {"uid", self.saml2_mxid_source_attribute} - - optional_attributes = {"displayName"} if self.saml2_grandfathered_mxid_source_attribute: optional_attributes.add(self.saml2_grandfathered_mxid_source_attribute) optional_attributes -= required_attributes @@ -207,33 +268,58 @@ class SAML2Config(Config): # #config_path: "%(config_dir_path)s/sp_conf.py" - # the lifetime of a SAML session. This defines how long a user has to + # The lifetime of a SAML session. This defines how long a user has to # complete the authentication process, if allow_unsolicited is unset. # The default is 5 minutes. # #saml_session_lifetime: 5m - # The SAML attribute (after mapping via the attribute maps) to use to derive - # the Matrix ID from. 'uid' by default. + # An external module can be provided here as a custom solution to + # mapping attributes returned from a saml provider onto a matrix user. # - #mxid_source_attribute: displayName - - # The mapping system to use for mapping the saml attribute onto a matrix ID. - # Options include: - # * 'hexencode' (which maps unpermitted characters to '=xx') - # * 'dotreplace' (which replaces unpermitted characters with '.'). - # The default is 'hexencode'. - # - #mxid_mapping: dotreplace - - # In previous versions of synapse, the mapping from SAML attribute to MXID was - # always calculated dynamically rather than stored in a table. For backwards- - # compatibility, we will look for user_ids matching such a pattern before - # creating a new account. + user_mapping_provider: + # The custom module's class. Uncomment to use a custom module. + # + #module: mapping_provider.SamlMappingProvider + + # Custom configuration values for the module. Below options are + # intended for the built-in provider, they should be changed if + # using a custom module. This section will be passed as a Python + # dictionary to the module's `parse_config` method. + # + config: + # The SAML attribute (after mapping via the attribute maps) to use + # to derive the Matrix ID from. 'uid' by default. + # + # Note: This used to be configured by the + # saml2_config.mxid_source_attribute option. If that is still + # defined, its value will be used instead. + # + #mxid_source_attribute: displayName + + # The mapping system to use for mapping the saml attribute onto a + # matrix ID. + # + # Options include: + # * 'hexencode' (which maps unpermitted characters to '=xx') + # * 'dotreplace' (which replaces unpermitted characters with + # '.'). + # The default is 'hexencode'. + # + # Note: This used to be configured by the + # saml2_config.mxid_mapping option. If that is still defined, its + # value will be used instead. + # + #mxid_mapping: dotreplace + + # In previous versions of synapse, the mapping from SAML attribute to + # MXID was always calculated dynamically rather than stored in a + # table. For backwards- compatibility, we will look for user_ids + # matching such a pattern before creating a new account. # # This setting controls the SAML attribute which will be used for this - # backwards-compatibility lookup. Typically it should be 'uid', but if the - # attribute maps are changed, it may be necessary to change it. + # backwards-compatibility lookup. Typically it should be 'uid', but if + # the attribute maps are changed, it may be necessary to change it. # # The default is 'uid'. # @@ -241,23 +327,3 @@ class SAML2Config(Config): """ % { "config_dir_path": config_dir_path } - - -DOT_REPLACE_PATTERN = re.compile( - ("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),)) -) - - -def dot_replace_for_mxid(username: str) -> str: - username = username.lower() - username = DOT_REPLACE_PATTERN.sub(".", username) - - # regular mxids aren't allowed to start with an underscore either - username = re.sub("^_", "", username) - return username - - -MXID_MAPPER_MAP = { - "hexencode": map_username_to_mxid_localpart, - "dotreplace": dot_replace_for_mxid, -} diff --git a/synapse/config/server.py b/synapse/config/server.py
index a4bef00936..3a8e920bf0 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py
@@ -102,6 +102,12 @@ class ServerConfig(Config): "require_auth_for_profile_requests", False ) + # Whether to require sharing a room with a user to retrieve their + # profile data + self.limit_profile_requests_to_known_users = config.get( + "limit_profile_requests_to_known_users", False + ) + if "restrict_public_rooms_to_local_users" in config and ( "allow_public_rooms_without_auth" in config or "allow_public_rooms_over_federation" in config @@ -200,7 +206,7 @@ class ServerConfig(Config): self.admin_contact = config.get("admin_contact", None) # FIXME: federation_domain_whitelist needs sytests - self.federation_domain_whitelist = None + self.federation_domain_whitelist = None # type: Optional[dict] federation_domain_whitelist = config.get("federation_domain_whitelist", None) if federation_domain_whitelist is not None: @@ -247,6 +253,12 @@ class ServerConfig(Config): # events with profile information that differ from the target's global profile. self.allow_per_room_profiles = config.get("allow_per_room_profiles", True) + # Whether to show the users on this homeserver in the user directory. Defaults to + # True. + self.show_users_in_user_directory = config.get( + "show_users_in_user_directory", True + ) + retention_config = config.get("retention") if retention_config is None: retention_config = {} @@ -621,6 +633,13 @@ class ServerConfig(Config): # #require_auth_for_profile_requests: true + # Whether to require a user to share a room with another user in order + # to retrieve their profile information. Only checked on Client-Server + # requests. Profile requests from other servers should be checked by the + # requesting server. Defaults to 'false'. + # + # limit_profile_requests_to_known_users: true + # If set to 'true', removes the need for authentication to access the server's # public rooms directory through the client API, meaning that anyone can # query the room directory. Defaults to 'false'. @@ -871,6 +890,74 @@ class ServerConfig(Config): # #allow_per_room_profiles: false + # Whether to show the users on this homeserver in the user directory. Defaults to + # 'true'. + # + #show_users_in_user_directory: false + + # Message retention policy at the server level. + # + # Room admins and mods can define a retention period for their rooms using the + # 'm.room.retention' state event, and server admins can cap this period by setting + # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options. + # + # If this feature is enabled, Synapse will regularly look for and purge events + # which are older than the room's maximum retention period. Synapse will also + # filter events received over federation so that events that should have been + # purged are ignored and not stored again. + # + retention: + # The message retention policies feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # Default retention policy. If set, Synapse will apply it to rooms that lack the + # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't + # matter much because Synapse doesn't take it into account yet. + # + #default_policy: + # min_lifetime: 1d + # max_lifetime: 1y + + # Retention policy limits. If set, a user won't be able to send a + # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime' + # that's not within this range. This is especially useful in closed federations, + # in which server admins can make sure every federating server applies the same + # rules. + # + #allowed_lifetime_min: 1d + #allowed_lifetime_max: 1y + + # Server admins can define the settings of the background jobs purging the + # events which lifetime has expired under the 'purge_jobs' section. + # + # If no configuration is provided, a single job will be set up to delete expired + # events in every room daily. + # + # Each job's configuration defines which range of message lifetimes the job + # takes care of. For example, if 'shortest_max_lifetime' is '2d' and + # 'longest_max_lifetime' is '3d', the job will handle purging expired events in + # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and + # lower than or equal to 3 days. Both the minimum and the maximum value of a + # range are optional, e.g. a job with no 'shortest_max_lifetime' and a + # 'longest_max_lifetime' of '3d' will handle every room with a retention policy + # which 'max_lifetime' is lower than or equal to three days. + # + # The rationale for this per-job configuration is that some rooms might have a + # retention policy with a low 'max_lifetime', where history needs to be purged + # of outdated messages on a very frequent basis (e.g. every 5min), but not want + # that purge to be performed by a job that's iterating over every room it knows, + # which would be quite heavy on the server. + # + #purge_jobs: + # - shortest_max_lifetime: 1d + # longest_max_lifetime: 3d + # interval: 5m: + # - shortest_max_lifetime: 3d + # longest_max_lifetime: 1y + # interval: 24h + # How long to keep redacted events in unredacted form in the database. After # this period redacted events get replaced with their redacted form in the DB. # diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py
index c8d19c5d6b..43b6c40456 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py
@@ -26,6 +26,7 @@ class UserDirectoryConfig(Config): def read_config(self, config, **kwargs): self.user_directory_search_enabled = True self.user_directory_search_all_users = False + self.user_directory_defer_to_id_server = None user_directory_config = config.get("user_directory", None) if user_directory_config: self.user_directory_search_enabled = user_directory_config.get( @@ -34,6 +35,9 @@ class UserDirectoryConfig(Config): self.user_directory_search_all_users = user_directory_config.get( "search_all_users", False ) + self.user_directory_defer_to_id_server = user_directory_config.get( + "defer_to_id_server", None + ) def generate_config_section(self, config_dir_path, server_name, **kwargs): return """ @@ -52,4 +56,9 @@ class UserDirectoryConfig(Config): #user_directory: # enabled: true # search_all_users: false + # + # # If this is set, user search will be delegated to this ID server instead + # # of synapse performing the search itself. + # # This is an experimental API. + # defer_to_id_server: https://id.example.com """ diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index d184b0273b..80ec911b3d 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py
@@ -42,6 +42,8 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru Returns: if the auth checks pass. """ + assert isinstance(auth_events, dict) + if do_size_check: _check_size_limits(event) @@ -86,12 +88,6 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru if not event.signatures.get(event_id_domain): raise AuthError(403, "Event not signed by sending server") - if auth_events is None: - # Oh, we don't know what the state of the room was, so we - # are trusting that this is allowed (at least for now) - logger.warning("Trusting event: %s", event.event_id) - return - if event.type == EventTypes.Create: sender_domain = get_domain_from_id(event.sender) room_id_domain = get_domain_from_id(event.room_id) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 5a907718d6..b7a31c9a7d 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py
@@ -58,13 +58,33 @@ class SpamChecker(object): return self.spam_checker.check_event_for_spam(event) - def user_may_invite(self, inviter_userid, invitee_userid, room_id): + def user_may_invite( + self, + inviter_userid, + invitee_userid, + third_party_invite, + room_id, + new_room, + published_room, + ): """Checks if a given user may send an invite If this method returns false, the invite will be rejected. Args: - userid (string): The sender's user ID + inviter_userid (str) + invitee_userid (str|None): The user ID of the invitee. Is None + if this is a third party invite and the 3PID is not bound to a + user ID. + third_party_invite (dict|None): If a third party invite then is a + dict containing the medium and address of the invitee. + room_id (str) + new_room (bool): Whether the user is being invited to the room as + part of a room creation, if so the invitee would have been + included in the call to `user_may_create_room`. + published_room (bool): Whether the room the user is being invited + to has been published in the local homeserver's public room + directory. Returns: bool: True if the user may send an invite, otherwise False @@ -73,16 +93,29 @@ class SpamChecker(object): return True return self.spam_checker.user_may_invite( - inviter_userid, invitee_userid, room_id + inviter_userid, + invitee_userid, + third_party_invite, + room_id, + new_room, + published_room, ) - def user_may_create_room(self, userid): + def user_may_create_room( + self, userid, invite_list, third_party_invite_list, cloning + ): """Checks if a given user may create a room If this method returns false, the creation request will be rejected. Args: userid (string): The sender's user ID + invite_list (list[str]): List of user IDs that would be invited to + the new room. + third_party_invite_list (list[dict]): List of third party invites + for the new room. + cloning (bool): Whether the user is cloning an existing room, e.g. + upgrading a room. Returns: bool: True if the user may create a room, otherwise False @@ -90,7 +123,9 @@ class SpamChecker(object): if self.spam_checker is None: return True - return self.spam_checker.user_may_create_room(userid) + return self.spam_checker.user_may_create_room( + userid, invite_list, third_party_invite_list, cloning + ) def user_may_create_room_alias(self, userid, room_alias): """Checks if a given user may create a room alias @@ -125,3 +160,21 @@ class SpamChecker(object): return True return self.spam_checker.user_may_publish_room(userid, room_id) + + def user_may_join_room(self, userid, room_id, is_invited): + """Checks if a given users is allowed to join a room. + + Is not called when the user creates a room. + + Args: + userid (str) + room_id (str) + is_invited (bool): Whether the user is invited into the room + + Returns: + bool: Whether the user may join the room + """ + if self.spam_checker is None: + return True + + return self.spam_checker.user_may_join_room(userid, room_id, is_invited) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index d396e6564f..af652a7659 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py
@@ -526,13 +526,7 @@ class FederationClient(FederationBase): @defer.inlineCallbacks def send_request(destination): - time_now = self._clock.time_msec() - _, content = yield self.transport_layer.send_join( - destination=destination, - room_id=pdu.room_id, - event_id=pdu.event_id, - content=pdu.get_pdu_json(time_now), - ) + content = yield self._do_send_join(destination, pdu) logger.debug("Got content: %s", content) @@ -600,6 +594,44 @@ class FederationClient(FederationBase): return self._try_destination_list("send_join", destinations, send_request) @defer.inlineCallbacks + def _do_send_join(self, destination, pdu): + time_now = self._clock.time_msec() + + try: + content = yield self.transport_layer.send_join_v2( + destination=destination, + room_id=pdu.room_id, + event_id=pdu.event_id, + content=pdu.get_pdu_json(time_now), + ) + + return content + except HttpResponseException as e: + if e.code in [400, 404]: + err = e.to_synapse_error() + + # If we receive an error response that isn't a generic error, or an + # unrecognised endpoint error, we assume that the remote understands + # the v2 invite API and this is a legitimate error. + if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: + raise err + else: + raise e.to_synapse_error() + + logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API") + + resp = yield self.transport_layer.send_join_v1( + destination=destination, + room_id=pdu.room_id, + event_id=pdu.event_id, + content=pdu.get_pdu_json(time_now), + ) + + # We expect the v1 API to respond with [200, content], so we only return the + # content. + return resp[1] + + @defer.inlineCallbacks def send_invite(self, destination, room_id, event_id, pdu): room_version = yield self.store.get_room_version(room_id) @@ -708,18 +740,50 @@ class FederationClient(FederationBase): @defer.inlineCallbacks def send_request(destination): - time_now = self._clock.time_msec() - _, content = yield self.transport_layer.send_leave( + content = yield self._do_send_leave(destination, pdu) + + logger.debug("Got content: %s", content) + return None + + return self._try_destination_list("send_leave", destinations, send_request) + + @defer.inlineCallbacks + def _do_send_leave(self, destination, pdu): + time_now = self._clock.time_msec() + + try: + content = yield self.transport_layer.send_leave_v2( destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, content=pdu.get_pdu_json(time_now), ) - logger.debug("Got content: %s", content) - return None + return content + except HttpResponseException as e: + if e.code in [400, 404]: + err = e.to_synapse_error() - return self._try_destination_list("send_leave", destinations, send_request) + # If we receive an error response that isn't a generic error, or an + # unrecognised endpoint error, we assume that the remote understands + # the v2 invite API and this is a legitimate error. + if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: + raise err + else: + raise e.to_synapse_error() + + logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API") + + resp = yield self.transport_layer.send_leave_v1( + destination=destination, + room_id=pdu.room_id, + event_id=pdu.event_id, + content=pdu.get_pdu_json(time_now), + ) + + # We expect the v1 API to respond with [200, content], so we only return the + # content. + return resp[1] def get_public_rooms( self, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 84d4eca041..d7ce333822 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py
@@ -384,15 +384,10 @@ class FederationServer(FederationBase): res_pdus = await self.handler.on_send_join_request(origin, pdu) time_now = self._clock.time_msec() - return ( - 200, - { - "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], - "auth_chain": [ - p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] - ], - }, - ) + return { + "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], + "auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]], + } async def on_make_leave_request(self, origin, room_id, user_id): origin_host, _ = parse_server_name(origin) @@ -419,7 +414,7 @@ class FederationServer(FederationBase): pdu = await self._check_sigs_and_hash(room_version, pdu) await self.handler.on_send_leave_request(origin, pdu) - return 200, {} + return {} async def on_event_auth(self, origin, room_id, event_id): with (await self._server_linearizer.queue((origin, room_id))): diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py
index 46dba84cac..198257414b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py
@@ -243,7 +243,7 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function - def send_join(self, destination, room_id, event_id, content): + def send_join_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/send_join/%s/%s", room_id, event_id) response = yield self.client.put_json( @@ -254,7 +254,18 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function - def send_leave(self, destination, room_id, event_id, content): + def send_join_v2(self, destination, room_id, event_id, content): + path = _create_v2_path("/send_join/%s/%s", room_id, event_id) + + response = yield self.client.put_json( + destination=destination, path=path, data=content + ) + + return response + + @defer.inlineCallbacks + @log_function + def send_leave_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/send_leave/%s/%s", room_id, event_id) response = yield self.client.put_json( @@ -272,6 +283,24 @@ class TransportLayerClient(object): @defer.inlineCallbacks @log_function + def send_leave_v2(self, destination, room_id, event_id, content): + path = _create_v2_path("/send_leave/%s/%s", room_id, event_id) + + response = yield self.client.put_json( + destination=destination, + path=path, + data=content, + # we want to do our best to send this through. The problem is + # that if it fails, we won't retry it later, so if the remote + # server was just having a momentary blip, the room will be out of + # sync. + ignore_backoff=True, + ) + + return response + + @defer.inlineCallbacks + @log_function def send_invite_v1(self, destination, room_id, event_id, content): path = _create_v1_path("/invite/%s/%s", room_id, event_id) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index fefc789c85..b4cbf23394 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py
@@ -506,11 +506,21 @@ class FederationMakeLeaveServlet(BaseFederationServlet): return 200, content -class FederationSendLeaveServlet(BaseFederationServlet): +class FederationV1SendLeaveServlet(BaseFederationServlet): PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)" async def on_PUT(self, origin, content, query, room_id, event_id): content = await self.handler.on_send_leave_request(origin, content, room_id) + return 200, (200, content) + + +class FederationV2SendLeaveServlet(BaseFederationServlet): + PATH = "/send_leave/(?P<room_id>[^/]*)/(?P<event_id>[^/]*)" + + PREFIX = FEDERATION_V2_PREFIX + + async def on_PUT(self, origin, content, query, room_id, event_id): + content = await self.handler.on_send_leave_request(origin, content, room_id) return 200, content @@ -521,9 +531,21 @@ class FederationEventAuthServlet(BaseFederationServlet): return await self.handler.on_event_auth(origin, context, event_id) -class FederationSendJoinServlet(BaseFederationServlet): +class FederationV1SendJoinServlet(BaseFederationServlet): + PATH = "/send_join/(?P<context>[^/]*)/(?P<event_id>[^/]*)" + + async def on_PUT(self, origin, content, query, context, event_id): + # TODO(paul): assert that context/event_id parsed from path actually + # match those given in content + content = await self.handler.on_send_join_request(origin, content, context) + return 200, (200, content) + + +class FederationV2SendJoinServlet(BaseFederationServlet): PATH = "/send_join/(?P<context>[^/]*)/(?P<event_id>[^/]*)" + PREFIX = FEDERATION_V2_PREFIX + async def on_PUT(self, origin, content, query, context, event_id): # TODO(paul): assert that context/event_id parsed from path actually # match those given in content @@ -1367,8 +1389,10 @@ FEDERATION_SERVLET_CLASSES = ( FederationMakeJoinServlet, FederationMakeLeaveServlet, FederationEventServlet, - FederationSendJoinServlet, - FederationSendLeaveServlet, + FederationV1SendJoinServlet, + FederationV2SendJoinServlet, + FederationV1SendLeaveServlet, + FederationV2SendLeaveServlet, FederationV1InviteServlet, FederationV2InviteServlet, FederationQueryAuthServlet, diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index 29e8ffc295..0ec9be3cb5 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py
@@ -773,6 +773,11 @@ class GroupsServerHandler(object): if not self.hs.is_mine_id(user_id): yield self.store.maybe_delete_remote_profile_cache(user_id) + # Delete group if the last user has left + users = yield self.store.get_users_in_group(group_id, include_private=True) + if not users: + yield self.store.delete_group(group_id) + return {} @defer.inlineCallbacks diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py
index 2d7e6df6e4..20ec1ca01b 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py
@@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer - class AccountDataEventSource(object): def __init__(self, hs): @@ -23,15 +21,14 @@ class AccountDataEventSource(object): def get_current_key(self, direction="f"): return self.store.get_max_account_data_stream_id() - @defer.inlineCallbacks - def get_new_events(self, user, from_key, **kwargs): + async def get_new_events(self, user, from_key, **kwargs): user_id = user.to_string() last_stream_id = from_key - current_stream_id = yield self.store.get_max_account_data_stream_id() + current_stream_id = self.store.get_max_account_data_stream_id() results = [] - tags = yield self.store.get_updated_tags(user_id, last_stream_id) + tags = await self.store.get_updated_tags(user_id, last_stream_id) for room_id, room_tags in tags.items(): results.append( @@ -41,7 +38,7 @@ class AccountDataEventSource(object): ( account_data, room_account_data, - ) = yield self.store.get_updated_account_data_for_user(user_id, last_stream_id) + ) = await self.store.get_updated_account_data_for_user(user_id, last_stream_id) for account_data_type, content in account_data.items(): results.append({"type": account_data_type, "content": content}) @@ -54,6 +51,5 @@ class AccountDataEventSource(object): return results, current_stream_id - @defer.inlineCallbacks - def get_pagination_rows(self, user, config, key): + async def get_pagination_rows(self, user, config, key): return [], config.to_id diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index d04e0fe576..6c46c995d2 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py
@@ -18,6 +18,7 @@ import email.utils import logging from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText +from typing import List from twisted.internet import defer @@ -44,6 +45,8 @@ class AccountValidityHandler(object): self.clock = self.hs.get_clock() self._account_validity = self.hs.config.account_validity + self._show_users_in_user_directory = self.hs.config.show_users_in_user_directory + self.profile_handler = self.hs.get_profile_handler() if self._account_validity.renew_by_email_enabled and load_jinja2_templates: # Don't do email-specific configuration if renewal by email is disabled. @@ -78,42 +81,42 @@ class AccountValidityHandler(object): # run as a background process to make sure that the database transactions # have a logcontext to report to return run_as_background_process( - "send_renewals", self.send_renewal_emails + "send_renewals", self._send_renewal_emails ) self.clock.looping_call(send_emails, 30 * 60 * 1000) - @defer.inlineCallbacks - def send_renewal_emails(self): + # Check every hour to remove expired users from the user directory + self.clock.looping_call(self._mark_expired_users_as_inactive, 60 * 60 * 1000) + + async def _send_renewal_emails(self): """Gets the list of users whose account is expiring in the amount of time configured in the ``renew_at`` parameter from the ``account_validity`` configuration, and sends renewal emails to all of these users as long as they have an email 3PID attached to their account. """ - expiring_users = yield self.store.get_users_expiring_soon() + expiring_users = await self.store.get_users_expiring_soon() if expiring_users: for user in expiring_users: - yield self._send_renewal_email( + await self._send_renewal_email( user_id=user["user_id"], expiration_ts=user["expiration_ts_ms"] ) - @defer.inlineCallbacks - def send_renewal_email_to_user(self, user_id): - expiration_ts = yield self.store.get_expiration_ts_for_user(user_id) - yield self._send_renewal_email(user_id, expiration_ts) + async def send_renewal_email_to_user(self, user_id: str): + expiration_ts = await self.store.get_expiration_ts_for_user(user_id) + await self._send_renewal_email(user_id, expiration_ts) - @defer.inlineCallbacks - def _send_renewal_email(self, user_id, expiration_ts): + async def _send_renewal_email(self, user_id: str, expiration_ts: int): """Sends out a renewal email to every email address attached to the given user with a unique link allowing them to renew their account. Args: - user_id (str): ID of the user to send email(s) to. - expiration_ts (int): Timestamp in milliseconds for the expiration date of + user_id: ID of the user to send email(s) to. + expiration_ts: Timestamp in milliseconds for the expiration date of this user's account (used in the email templates). """ - addresses = yield self._get_email_addresses_for_user(user_id) + addresses = await self._get_email_addresses_for_user(user_id) # Stop right here if the user doesn't have at least one email address. # In this case, they will have to ask their server admin to renew their @@ -125,7 +128,7 @@ class AccountValidityHandler(object): return try: - user_display_name = yield self.store.get_profile_displayname( + user_display_name = await self.store.get_profile_displayname( UserID.from_string(user_id).localpart ) if user_display_name is None: @@ -133,7 +136,7 @@ class AccountValidityHandler(object): except StoreError: user_display_name = user_id - renewal_token = yield self._get_renewal_token(user_id) + renewal_token = await self._get_renewal_token(user_id) url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % ( self.hs.config.public_baseurl, renewal_token, @@ -165,7 +168,7 @@ class AccountValidityHandler(object): logger.info("Sending renewal email to %s", address) - yield make_deferred_yieldable( + await make_deferred_yieldable( self.sendmail( self.hs.config.email_smtp_host, self._raw_from, @@ -180,19 +183,18 @@ class AccountValidityHandler(object): ) ) - yield self.store.set_renewal_mail_status(user_id=user_id, email_sent=True) + await self.store.set_renewal_mail_status(user_id=user_id, email_sent=True) - @defer.inlineCallbacks - def _get_email_addresses_for_user(self, user_id): + async def _get_email_addresses_for_user(self, user_id: str) -> List[str]: """Retrieve the list of email addresses attached to a user's account. Args: - user_id (str): ID of the user to lookup email addresses for. + user_id: ID of the user to lookup email addresses for. Returns: - defer.Deferred[list[str]]: Email addresses for this account. + Email addresses for this account. """ - threepids = yield self.store.user_get_threepids(user_id) + threepids = await self.store.user_get_threepids(user_id) addresses = [] for threepid in threepids: @@ -201,16 +203,15 @@ class AccountValidityHandler(object): return addresses - @defer.inlineCallbacks - def _get_renewal_token(self, user_id): + async def _get_renewal_token(self, user_id: str) -> str: """Generates a 32-byte long random string that will be inserted into the user's renewal email's unique link, then saves it into the database. Args: - user_id (str): ID of the user to generate a string for. + user_id: ID of the user to generate a string for. Returns: - defer.Deferred[str]: The generated string. + The generated string. Raises: StoreError(500): Couldn't generate a unique string after 5 attempts. @@ -219,53 +220,76 @@ class AccountValidityHandler(object): while attempts < 5: try: renewal_token = stringutils.random_string(32) - yield self.store.set_renewal_token_for_user(user_id, renewal_token) + await self.store.set_renewal_token_for_user(user_id, renewal_token) return renewal_token except StoreError: attempts += 1 raise StoreError(500, "Couldn't generate a unique string as refresh string.") - @defer.inlineCallbacks - def renew_account(self, renewal_token): + async def renew_account(self, renewal_token: str) -> bool: """Renews the account attached to a given renewal token by pushing back the expiration date by the current validity period in the server's configuration. Args: - renewal_token (str): Token sent with the renewal request. + renewal_token: Token sent with the renewal request. Returns: - bool: Whether the provided token is valid. + Whether the provided token is valid. """ try: - user_id = yield self.store.get_user_from_renewal_token(renewal_token) + user_id = await self.store.get_user_from_renewal_token(renewal_token) except StoreError: - defer.returnValue(False) + return False logger.debug("Renewing an account for user %s", user_id) - yield self.renew_account_for_user(user_id) + await self.renew_account_for_user(user_id) - defer.returnValue(True) + return True - @defer.inlineCallbacks - def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False): + async def renew_account_for_user( + self, user_id: str, expiration_ts: int = None, email_sent: bool = False + ) -> int: """Renews the account attached to a given user by pushing back the expiration date by the current validity period in the server's configuration. Args: - renewal_token (str): Token sent with the renewal request. - expiration_ts (int): New expiration date. Defaults to now + validity period. - email_sent (bool): Whether an email has been sent for this validity period. + renewal_token: Token sent with the renewal request. + expiration_ts: New expiration date. Defaults to now + validity period. + email_sen: Whether an email has been sent for this validity period. Defaults to False. Returns: - defer.Deferred[int]: New expiration date for this account, as a timestamp - in milliseconds since epoch. + New expiration date for this account, as a timestamp in + milliseconds since epoch. """ if expiration_ts is None: expiration_ts = self.clock.time_msec() + self._account_validity.period - yield self.store.set_account_validity_for_user( + await self.store.set_account_validity_for_user( user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent ) + # Check if renewed users should be reintroduced to the user directory + if self._show_users_in_user_directory: + # Show the user in the directory again by setting them to active + await self.profile_handler.set_active( + UserID.from_string(user_id), True, True + ) + return expiration_ts + + @defer.inlineCallbacks + def _mark_expired_users_as_inactive(self): + """Iterate over expired users. Mark them as inactive in order to hide them from the + user directory. + + Returns: + Deferred + """ + # Get expired users + expired_user_ids = yield self.store.get_expired_users() + expired_users = [UserID.from_string(user_id) for user_id in expired_user_ids] + + # Mark each one as non-active + for user in expired_users: + yield self.profile_handler.set_active(user, False, True) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 6dedaaff8d..09bf76038e 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py
@@ -35,6 +35,7 @@ class DeactivateAccountHandler(BaseHandler): self._device_handler = hs.get_device_handler() self._room_member_handler = hs.get_room_member_handler() self._identity_handler = hs.get_handlers().identity_handler + self._profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() # Flag that indicates whether the process to part users from rooms is running @@ -107,6 +108,9 @@ class DeactivateAccountHandler(BaseHandler): yield self.store.user_set_password_hash(user_id, None) + user = UserID.from_string(user_id) + yield self._profile_handler.set_active(user, False, False) + # Add the user to a table of users pending deactivation (ie. # removal from all the rooms they're a member of) yield self.store.add_user_pending_deactivation(user_id) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py
index 57a10daefd..2d889364d4 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py
@@ -264,6 +264,7 @@ class E2eKeysHandler(object): return ret + @defer.inlineCallbacks def get_cross_signing_keys_from_cache(self, query, from_user_id): """Get cross-signing keys for users from the database @@ -283,14 +284,32 @@ class E2eKeysHandler(object): self_signing_keys = {} user_signing_keys = {} - # Currently a stub, implementation coming in https://github.com/matrix-org/synapse/pull/6486 - return defer.succeed( - { - "master_keys": master_keys, - "self_signing_keys": self_signing_keys, - "user_signing_keys": user_signing_keys, - } - ) + user_ids = list(query) + + keys = yield self.store.get_e2e_cross_signing_keys_bulk(user_ids, from_user_id) + + for user_id, user_info in keys.items(): + if user_info is None: + continue + if "master" in user_info: + master_keys[user_id] = user_info["master"] + if "self_signing" in user_info: + self_signing_keys[user_id] = user_info["self_signing"] + + if ( + from_user_id in keys + and keys[from_user_id] is not None + and "user_signing" in keys[from_user_id] + ): + # users can see other users' master and self-signing keys, but can + # only see their own user-signing keys + user_signing_keys[from_user_id] = keys[from_user_id]["user_signing"] + + return { + "master_keys": master_keys, + "self_signing_keys": self_signing_keys, + "user_signing_keys": user_signing_keys, + } @trace @defer.inlineCallbacks diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index abe02907b9..623a1f0801 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py
@@ -19,7 +19,7 @@ import itertools import logging -from typing import Dict, Iterable, Optional, Sequence, Tuple +from typing import Dict, Iterable, List, Optional, Sequence, Tuple import six from six import iteritems, itervalues @@ -63,6 +63,7 @@ from synapse.replication.http.federation import ( ) from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet from synapse.state import StateResolutionStore, resolve_events_with_store +from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour from synapse.types import UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.distributor import user_joined_room @@ -163,8 +164,7 @@ class FederationHandler(BaseHandler): self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages - @defer.inlineCallbacks - def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False): + async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None: """ Process a PDU received via a federation /send/ transaction, or via backfill of missing prev_events @@ -174,17 +174,15 @@ class FederationHandler(BaseHandler): pdu (FrozenEvent): received PDU sent_to_us_directly (bool): True if this event was pushed to us; False if we pulled it as the result of a missing prev_event. - - Returns (Deferred): completes with None """ room_id = pdu.room_id event_id = pdu.event_id - logger.info("[%s %s] handling received PDU: %s", room_id, event_id, pdu) + logger.info("handling received PDU: %s", pdu) # We reprocess pdus when we have seen them only as outliers - existing = yield self.store.get_event( + existing = await self.store.get_event( event_id, allow_none=True, allow_rejected=True ) @@ -228,7 +226,7 @@ class FederationHandler(BaseHandler): # # Note that if we were never in the room then we would have already # dropped the event, since we wouldn't know the room version. - is_in_room = yield self.auth.check_host_in_room(room_id, self.server_name) + is_in_room = await self.auth.check_host_in_room(room_id, self.server_name) if not is_in_room: logger.info( "[%s %s] Ignoring PDU from %s as we're not in the room", @@ -243,12 +241,12 @@ class FederationHandler(BaseHandler): # Get missing pdus if necessary. if not pdu.internal_metadata.is_outlier(): # We only backfill backwards to the min depth. - min_depth = yield self.get_min_depth_for_context(pdu.room_id) + min_depth = await self.get_min_depth_for_context(pdu.room_id) logger.debug("[%s %s] min_depth: %d", room_id, event_id, min_depth) prevs = set(pdu.prev_event_ids()) - seen = yield self.store.have_seen_events(prevs) + seen = await self.store.have_seen_events(prevs) if min_depth and pdu.depth < min_depth: # This is so that we don't notify the user about this @@ -268,7 +266,7 @@ class FederationHandler(BaseHandler): len(missing_prevs), shortstr(missing_prevs), ) - with (yield self._room_pdu_linearizer.queue(pdu.room_id)): + with (await self._room_pdu_linearizer.queue(pdu.room_id)): logger.info( "[%s %s] Acquired room lock to fetch %d missing prev_events", room_id, @@ -276,13 +274,19 @@ class FederationHandler(BaseHandler): len(missing_prevs), ) - yield self._get_missing_events_for_pdu( - origin, pdu, prevs, min_depth - ) + try: + await self._get_missing_events_for_pdu( + origin, pdu, prevs, min_depth + ) + except Exception as e: + raise Exception( + "Error fetching missing prev_events for %s: %s" + % (event_id, e) + ) # Update the set of things we've seen after trying to # fetch the missing stuff - seen = yield self.store.have_seen_events(prevs) + seen = await self.store.have_seen_events(prevs) if not prevs - seen: logger.info( @@ -290,14 +294,6 @@ class FederationHandler(BaseHandler): room_id, event_id, ) - elif missing_prevs: - logger.info( - "[%s %s] Not recursively fetching %d missing prev_events: %s", - room_id, - event_id, - len(missing_prevs), - shortstr(missing_prevs), - ) if prevs - seen: # We've still not been able to get all of the prev_events for this event. @@ -347,7 +343,7 @@ class FederationHandler(BaseHandler): event_map = {event_id: pdu} try: # Get the state of the events we know about - ours = yield self.state_store.get_state_groups_ids(room_id, seen) + ours = await self.state_store.get_state_groups_ids(room_id, seen) # state_maps is a list of mappings from (type, state_key) to event_id state_maps = list( @@ -371,7 +367,7 @@ class FederationHandler(BaseHandler): # note that if any of the missing prevs share missing state or # auth events, the requests to fetch those events are deduped # by the get_pdu_cache in federation_client. - (remote_state, _,) = yield self._get_state_for_room( + (remote_state, _,) = await self._get_state_for_room( origin, room_id, p, include_event_in_state=True ) @@ -383,8 +379,8 @@ class FederationHandler(BaseHandler): for x in remote_state: event_map[x.event_id] = x - room_version = yield self.store.get_room_version(room_id) - state_map = yield resolve_events_with_store( + room_version = await self.store.get_room_version(room_id) + state_map = await resolve_events_with_store( room_id, room_version, state_maps, @@ -397,7 +393,7 @@ class FederationHandler(BaseHandler): # First though we need to fetch all the events that are in # state_map, so we can build up the state below. - evs = yield self.store.get_events( + evs = await self.store.get_events( list(state_map.values()), get_prev_content=False, check_redacted=False, @@ -420,10 +416,9 @@ class FederationHandler(BaseHandler): affected=event_id, ) - yield self._process_received_pdu(origin, pdu, state=state) + await self._process_received_pdu(origin, pdu, state=state) - @defer.inlineCallbacks - def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): + async def _get_missing_events_for_pdu(self, origin, pdu, prevs, min_depth): """ Args: origin (str): Origin of the pdu. Will be called to get the missing events @@ -435,12 +430,12 @@ class FederationHandler(BaseHandler): room_id = pdu.room_id event_id = pdu.event_id - seen = yield self.store.have_seen_events(prevs) + seen = await self.store.have_seen_events(prevs) if not prevs - seen: return - latest = yield self.store.get_latest_event_ids_in_room(room_id) + latest = await self.store.get_latest_event_ids_in_room(room_id) # We add the prev events that we have seen to the latest # list to ensure the remote server doesn't give them to us @@ -504,7 +499,7 @@ class FederationHandler(BaseHandler): # All that said: Let's try increasing the timout to 60s and see what happens. try: - missing_events = yield self.federation_client.get_missing_events( + missing_events = await self.federation_client.get_missing_events( origin, room_id, earliest_events_ids=list(latest), @@ -543,7 +538,7 @@ class FederationHandler(BaseHandler): ) with nested_logging_context(ev.event_id): try: - yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False) + await self.on_receive_pdu(origin, ev, sent_to_us_directly=False) except FederationError as e: if e.code == 403: logger.warning( @@ -737,8 +732,7 @@ class FederationHandler(BaseHandler): yield self.user_joined_room(user, room_id) @log_function - @defer.inlineCallbacks - def backfill(self, dest, room_id, limit, extremities): + async def backfill(self, dest, room_id, limit, extremities): """ Trigger a backfill request to `dest` for the given `room_id` This will attempt to get more events from the remote. If the other side @@ -755,7 +749,7 @@ class FederationHandler(BaseHandler): if dest == self.server_name: raise SynapseError(400, "Can't backfill from self.") - events = yield self.federation_client.backfill( + events = await self.federation_client.backfill( dest, room_id, limit=limit, extremities=extremities ) @@ -770,7 +764,7 @@ class FederationHandler(BaseHandler): # self._sanity_check_event(ev) # Don't bother processing events we already have. - seen_events = yield self.store.have_events_in_timeline( + seen_events = await self.store.have_events_in_timeline( set(e.event_id for e in events) ) @@ -796,7 +790,7 @@ class FederationHandler(BaseHandler): state_events = {} events_to_state = {} for e_id in edges: - state, auth = yield self._get_state_for_room( + state, auth = await self._get_state_for_room( destination=dest, room_id=room_id, event_id=e_id ) auth_events.update({a.event_id: a for a in auth}) @@ -840,7 +834,7 @@ class FederationHandler(BaseHandler): ) ) - yield self._handle_new_events(dest, ev_infos, backfilled=True) + await self._handle_new_events(dest, ev_infos, backfilled=True) # Step 2: Persist the rest of the events in the chunk one by one events.sort(key=lambda e: e.depth) @@ -856,16 +850,15 @@ class FederationHandler(BaseHandler): # We store these one at a time since each event depends on the # previous to work out the state. # TODO: We can probably do something more clever here. - yield self._handle_new_event(dest, event, backfilled=True) + await self._handle_new_event(dest, event, backfilled=True) return events - @defer.inlineCallbacks - def maybe_backfill(self, room_id, current_depth): + async def maybe_backfill(self, room_id, current_depth): """Checks the database to see if we should backfill before paginating, and if so do. """ - extremities = yield self.store.get_oldest_events_with_depth_in_room(room_id) + extremities = await self.store.get_oldest_events_with_depth_in_room(room_id) if not extremities: logger.debug("Not backfilling as no extremeties found.") @@ -897,15 +890,17 @@ class FederationHandler(BaseHandler): # state *before* the event, ignoring the special casing certain event # types have. - forward_events = yield self.store.get_successor_events(list(extremities)) + forward_events = await self.store.get_successor_events(list(extremities)) - extremities_events = yield self.store.get_events( - forward_events, check_redacted=False, get_prev_content=False + extremities_events = await self.store.get_events( + forward_events, + redact_behaviour=EventRedactBehaviour.AS_IS, + get_prev_content=False, ) # We set `check_history_visibility_only` as we might otherwise get false # positives from users having been erased. - filtered_extremities = yield filter_events_for_server( + filtered_extremities = await filter_events_for_server( self.storage, self.server_name, list(extremities_events.values()), @@ -935,7 +930,7 @@ class FederationHandler(BaseHandler): # First we try hosts that are already in the room # TODO: HEURISTIC ALERT. - curr_state = yield self.state_handler.get_current_state(room_id) + curr_state = await self.state_handler.get_current_state(room_id) def get_domains_from_state(state): """Get joined domains from state @@ -974,12 +969,11 @@ class FederationHandler(BaseHandler): domain for domain, depth in curr_domains if domain != self.server_name ] - @defer.inlineCallbacks - def try_backfill(domains): + async def try_backfill(domains): # TODO: Should we try multiple of these at a time? for dom in domains: try: - yield self.backfill( + await self.backfill( dom, room_id, limit=100, extremities=extremities ) # If this succeeded then we probably already have the @@ -1010,7 +1004,7 @@ class FederationHandler(BaseHandler): return False - success = yield try_backfill(likely_domains) + success = await try_backfill(likely_domains) if success: return True @@ -1024,7 +1018,7 @@ class FederationHandler(BaseHandler): logger.debug("calling resolve_state_groups in _maybe_backfill") resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events) - states = yield make_deferred_yieldable( + states = await make_deferred_yieldable( defer.gatherResults( [resolve(room_id, [e]) for e in event_ids], consumeErrors=True ) @@ -1034,7 +1028,7 @@ class FederationHandler(BaseHandler): # event_ids. states = dict(zip(event_ids, [s.state for s in states])) - state_map = yield self.store.get_events( + state_map = await self.store.get_events( [e_id for ids in itervalues(states) for e_id in itervalues(ids)], get_prev_content=False, ) @@ -1050,7 +1044,7 @@ class FederationHandler(BaseHandler): for e_id, _ in sorted_extremeties_tuple: likely_domains = get_domains_from_state(states[e_id]) - success = yield try_backfill( + success = await try_backfill( [dom for dom, _ in likely_domains if dom not in tried_domains] ) if success: @@ -1060,8 +1054,7 @@ class FederationHandler(BaseHandler): return False - @defer.inlineCallbacks - def _get_events_and_persist( + async def _get_events_and_persist( self, destination: str, room_id: str, events: Iterable[str] ): """Fetch the given events from a server, and persist them as outliers. @@ -1069,7 +1062,7 @@ class FederationHandler(BaseHandler): Logs a warning if we can't find the given event. """ - room_version = yield self.store.get_room_version(room_id) + room_version = await self.store.get_room_version(room_id) event_infos = [] @@ -1105,9 +1098,9 @@ class FederationHandler(BaseHandler): e, ) - yield concurrently_execute(get_event, events, 5) + await concurrently_execute(get_event, events, 5) - yield self._handle_new_events( + await self._handle_new_events( destination, event_infos, ) @@ -1250,7 +1243,7 @@ class FederationHandler(BaseHandler): # Check whether this room is the result of an upgrade of a room we already know # about. If so, migrate over user information predecessor = yield self.store.get_room_predecessor(room_id) - if not predecessor: + if not predecessor or not isinstance(predecessor.get("room_id"), str): return old_room_id = predecessor["room_id"] logger.debug( @@ -1278,8 +1271,7 @@ class FederationHandler(BaseHandler): return True - @defer.inlineCallbacks - def _handle_queued_pdus(self, room_queue): + async def _handle_queued_pdus(self, room_queue): """Process PDUs which got queued up while we were busy send_joining. Args: @@ -1295,7 +1287,7 @@ class FederationHandler(BaseHandler): p.room_id, ) with nested_logging_context(p.event_id): - yield self.on_receive_pdu(origin, p, sent_to_us_directly=True) + await self.on_receive_pdu(origin, p, sent_to_us_directly=True) except Exception as e: logger.warning( "Error handling queued PDU %s from %s: %s", p.event_id, origin, e @@ -1452,8 +1444,15 @@ class FederationHandler(BaseHandler): if self.hs.config.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") + is_published = yield self.store.is_room_published(event.room_id) + if not self.spam_checker.user_may_invite( - event.sender, event.state_key, event.room_id + event.sender, + event.state_key, + None, + room_id=event.room_id, + new_room=False, + published_room=is_published, ): raise SynapseError( 403, "This user is not permitted to send invites to this server/user" @@ -1493,7 +1492,7 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks def do_remotely_reject_invite(self, target_hosts, room_id, user_id, content): origin, event, event_format_version = yield self._make_and_verify_event( - target_hosts, room_id, user_id, "leave", content=content, + target_hosts, room_id, user_id, "leave", content=content ) # Mark as outlier as we don't have any state for this event; we're not # even in the room. @@ -2854,7 +2853,7 @@ class FederationHandler(BaseHandler): room_id=room_id, user_id=user.to_string(), change="joined" ) else: - return user_joined_room(self.distributor, user, room_id) + return defer.succeed(user_joined_room(self.distributor, user, room_id)) @defer.inlineCallbacks def get_room_complexity(self, remote_room_hosts, room_id): diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 000fbf090f..517e045e5b 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py
@@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2018, 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,6 +33,7 @@ from synapse.api.errors import ( CodeMessageException, Codes, HttpResponseException, + ProxiedRequestError, SynapseError, ) from synapse.config.emailconfig import ThreepidBehaviour @@ -51,14 +52,21 @@ class IdentityHandler(BaseHandler): def __init__(self, hs): super(IdentityHandler, self).__init__(hs) - self.http_client = SimpleHttpClient(hs) + self.hs = hs + self.http_client = hs.get_simple_http_client() # We create a blacklisting instance of SimpleHttpClient for contacting identity # servers specified by clients self.blacklisting_http_client = SimpleHttpClient( hs, ip_blacklist=hs.config.federation_ip_range_blacklist ) self.federation_http_client = hs.get_http_client() - self.hs = hs + + self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers) + self.trust_any_id_server_just_for_testing_do_not_use = ( + hs.config.use_insecure_ssl_client_just_for_testing_do_not_use + ) + self.rewrite_identity_server_urls = hs.config.rewrite_identity_server_urls + self._enable_lookup = hs.config.enable_3pid_lookup @defer.inlineCallbacks def threepid_from_creds(self, id_server, creds): @@ -92,7 +100,15 @@ class IdentityHandler(BaseHandler): query_params = {"sid": session_id, "client_secret": client_secret} - url = id_server + "/_matrix/identity/api/v1/3pid/getValidated3pid" + # if we have a rewrite rule set for the identity server, + # apply it now. + if id_server in self.rewrite_identity_server_urls: + id_server = self.rewrite_identity_server_urls[id_server] + + url = "https://%s%s" % ( + id_server, + "/_matrix/identity/api/v1/3pid/getValidated3pid", + ) try: data = yield self.http_client.get_json(url, query_params) @@ -147,14 +163,24 @@ class IdentityHandler(BaseHandler): if id_access_token is None: use_v2 = False + # if we have a rewrite rule set for the identity server, + # apply it now, but only for sending the request (not + # storing in the database). + if id_server in self.rewrite_identity_server_urls: + id_server_host = self.rewrite_identity_server_urls[id_server] + else: + id_server_host = id_server + # Decide which API endpoint URLs to use headers = {} bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid} if use_v2: - bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,) - headers["Authorization"] = create_id_access_token_header(id_access_token) + bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server_host,) + headers["Authorization"] = create_id_access_token_header( + id_access_token + ) else: - bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,) + bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server_host,) try: # Use the blacklisting http client as this call is only to identity servers @@ -261,6 +287,16 @@ class IdentityHandler(BaseHandler): ) headers = {b"Authorization": auth_headers} + # if we have a rewrite rule set for the identity server, + # apply it now. + # + # Note that destination_is has to be the real id_server, not + # the server we connect to. + if id_server in self.rewrite_identity_server_urls: + id_server = self.rewrite_identity_server_urls[id_server] + + url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,) + try: # Use the blacklisting http client as this call is only to identity servers # provided by a client @@ -398,6 +434,12 @@ class IdentityHandler(BaseHandler): "client_secret": client_secret, "send_attempt": send_attempt, } + + # if we have a rewrite rule set for the identity server, + # apply it now. + if id_server in self.rewrite_identity_server_urls: + id_server = self.rewrite_identity_server_urls[id_server] + if next_link: params["next_link"] = next_link @@ -464,6 +506,10 @@ class IdentityHandler(BaseHandler): "details and update your config file." ) + # if we have a rewrite rule set for the identity server, + # apply it now. + if id_server in self.rewrite_identity_server_urls: + id_server = self.rewrite_identity_server_urls[id_server] try: data = yield self.http_client.post_json_get_json( id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken", @@ -564,6 +610,89 @@ class IdentityHandler(BaseHandler): logger.warning("Error contacting msisdn account_threepid_delegate: %s", e) raise SynapseError(400, "Error contacting the identity server") + # TODO: The following two methods are used for proxying IS requests using + # the CS API. They should be consolidated with those in RoomMemberHandler + # https://github.com/matrix-org/synapse-dinsic/issues/25 + + @defer.inlineCallbacks + def proxy_lookup_3pid(self, id_server, medium, address): + """Looks up a 3pid in the passed identity server. + + Args: + id_server (str): The server name (including port, if required) + of the identity server to use. + medium (str): The type of the third party identifier (e.g. "email"). + address (str): The third party identifier (e.g. "foo@example.com"). + + Returns: + Deferred[dict]: The result of the lookup. See + https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup + for details + """ + if not self._enable_lookup: + raise AuthError( + 403, "Looking up third-party identifiers is denied from this server" + ) + + target = self.rewrite_identity_server_urls.get(id_server, id_server) + + try: + data = yield self.http_client.get_json( + "https://%s/_matrix/identity/api/v1/lookup" % (target,), + {"medium": medium, "address": address}, + ) + + if "mxid" in data: + if "signatures" not in data: + raise AuthError(401, "No signatures on 3pid binding") + yield self._verify_any_signature(data, id_server) + + except HttpResponseException as e: + logger.info("Proxied lookup failed: %r", e) + raise e.to_synapse_error() + except IOError as e: + logger.info("Failed to contact %r: %s", id_server, e) + raise ProxiedRequestError(503, "Failed to contact identity server") + + defer.returnValue(data) + + @defer.inlineCallbacks + def proxy_bulk_lookup_3pid(self, id_server, threepids): + """Looks up given 3pids in the passed identity server. + + Args: + id_server (str): The server name (including port, if required) + of the identity server to use. + threepids ([[str, str]]): The third party identifiers to lookup, as + a list of 2-string sized lists ([medium, address]). + + Returns: + Deferred[dict]: The result of the lookup. See + https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup + for details + """ + if not self._enable_lookup: + raise AuthError( + 403, "Looking up third-party identifiers is denied from this server" + ) + + target = self.rewrite_identity_server_urls.get(id_server, id_server) + + try: + data = yield self.http_client.post_json_get_json( + "https://%s/_matrix/identity/api/v1/bulk_lookup" % (target,), + {"threepids": threepids}, + ) + + except HttpResponseException as e: + logger.info("Proxied lookup failed: %r", e) + raise e.to_synapse_error() + except IOError as e: + logger.info("Failed to contact %r: %s", id_server, e) + raise ProxiedRequestError(503, "Failed to contact identity server") + + defer.returnValue(data) + @defer.inlineCallbacks def lookup_3pid(self, id_server, medium, address, id_access_token=None): """Looks up a 3pid in the passed identity server. @@ -579,6 +708,9 @@ class IdentityHandler(BaseHandler): Returns: str|None: the matrix ID of the 3pid, or None if it is not recognized. """ + # Rewrite id_server URL if necessary + id_server = self._get_id_server_target(id_server) + if id_access_token is not None: try: results = yield self._lookup_3pid_v2( @@ -616,7 +748,7 @@ class IdentityHandler(BaseHandler): str: the matrix ID of the 3pid, or None if it is not recognized. """ try: - data = yield self.blacklisting_http_client.get_json( + data = yield self.http_client.get_json( "%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server), {"medium": medium, "address": address}, ) @@ -649,7 +781,7 @@ class IdentityHandler(BaseHandler): """ # Check what hashing details are supported by this identity server try: - hash_details = yield self.blacklisting_http_client.get_json( + hash_details = yield self.http_client.get_json( "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), {"access_token": id_access_token}, ) @@ -667,7 +799,7 @@ class IdentityHandler(BaseHandler): 400, "Non-dict object from %s%s during v2 hash_details request: %s" % (id_server_scheme, id_server, hash_details), - ) + ) # Extract information from hash_details supported_lookup_algorithms = hash_details.get("algorithms") @@ -682,7 +814,7 @@ class IdentityHandler(BaseHandler): 400, "Invalid hash details received from identity server %s%s: %s" % (id_server_scheme, id_server, hash_details), - ) + ) # Check if any of the supported lookup algorithms are present if LookupAlgorithm.SHA256 in supported_lookup_algorithms: @@ -716,7 +848,7 @@ class IdentityHandler(BaseHandler): headers = {"Authorization": create_id_access_token_header(id_access_token)} try: - lookup_results = yield self.blacklisting_http_client.post_json_get_json( + lookup_results = yield self.http_client.post_json_get_json( "%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server), { "addresses": [lookup_value], @@ -724,7 +856,7 @@ class IdentityHandler(BaseHandler): "pepper": lookup_pepper, }, headers=headers, - ) + ) except TimeoutError: raise SynapseError(500, "Timed out contacting identity server") except Exception as e: @@ -748,14 +880,15 @@ class IdentityHandler(BaseHandler): def _verify_any_signature(self, data, server_hostname): if server_hostname not in data["signatures"]: raise AuthError(401, "No signature from server %s" % (server_hostname,)) + for key_name, signature in data["signatures"][server_hostname].items(): - try: - key_data = yield self.blacklisting_http_client.get_json( - "%s%s/_matrix/identity/api/v1/pubkey/%s" - % (id_server_scheme, server_hostname, key_name) - ) - except TimeoutError: - raise SynapseError(500, "Timed out contacting identity server") + target = self.rewrite_identity_server_urls.get( + server_hostname, server_hostname + ) + + key_data = yield self.http_client.get_json( + "https://%s/_matrix/identity/api/v1/pubkey/%s" % (target, key_name) + ) if "public_key" not in key_data: raise AuthError( 401, "No public key named %s from %s" % (key_name, server_hostname) @@ -769,6 +902,23 @@ class IdentityHandler(BaseHandler): ) return + raise AuthError(401, "No signature from server %s" % (server_hostname,)) + + def _get_id_server_target(self, id_server): + """Looks up an id_server's actual http endpoint + + Args: + id_server (str): the server name to lookup. + + Returns: + the http endpoint to connect to. + """ + if id_server in self.rewrite_identity_server_urls: + return self.rewrite_identity_server_urls[id_server] + + return id_server + + @defer.inlineCallbacks def ask_id_server_for_third_party_invite( self, @@ -829,6 +979,9 @@ class IdentityHandler(BaseHandler): "sender_avatar_url": inviter_avatar_url, } + # Rewrite the identity server URL if necessary + id_server = self._get_id_server_target(id_server) + # Add the identity service access token to the JSON body and use the v2 # Identity Service endpoints if id_access_token is present data = None diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py
index 81dce96f4b..44ec3e66ae 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py
@@ -26,7 +26,7 @@ from synapse.streams.config import PaginationConfig from synapse.types import StreamToken, UserID from synapse.util import unwrapFirstError from synapse.util.async_helpers import concurrently_execute -from synapse.util.caches.snapshot_cache import SnapshotCache +from synapse.util.caches.response_cache import ResponseCache from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -41,7 +41,7 @@ class InitialSyncHandler(BaseHandler): self.state = hs.get_state_handler() self.clock = hs.get_clock() self.validator = EventValidator() - self.snapshot_cache = SnapshotCache() + self.snapshot_cache = ResponseCache(hs, "initial_sync_cache") self._event_serializer = hs.get_event_client_serializer() self.storage = hs.get_storage() self.state_store = self.storage.state @@ -79,21 +79,17 @@ class InitialSyncHandler(BaseHandler): as_client_event, include_archived, ) - now_ms = self.clock.time_msec() - result = self.snapshot_cache.get(now_ms, key) - if result is not None: - return result - return self.snapshot_cache.set( - now_ms, + return self.snapshot_cache.wrap( key, - self._snapshot_all_rooms( - user_id, pagin_config, as_client_event, include_archived - ), + self._snapshot_all_rooms, + user_id, + pagin_config, + as_client_event, + include_archived, ) - @defer.inlineCallbacks - def _snapshot_all_rooms( + async def _snapshot_all_rooms( self, user_id=None, pagin_config=None, @@ -105,7 +101,7 @@ class InitialSyncHandler(BaseHandler): if include_archived: memberships.append(Membership.LEAVE) - room_list = yield self.store.get_rooms_for_user_where_membership_is( + room_list = await self.store.get_rooms_for_user_where_membership_is( user_id=user_id, membership_list=memberships ) @@ -113,33 +109,32 @@ class InitialSyncHandler(BaseHandler): rooms_ret = [] - now_token = yield self.hs.get_event_sources().get_current_token() + now_token = await self.hs.get_event_sources().get_current_token() presence_stream = self.hs.get_event_sources().sources["presence"] pagination_config = PaginationConfig(from_token=now_token) - presence, _ = yield presence_stream.get_pagination_rows( + presence, _ = await presence_stream.get_pagination_rows( user, pagination_config.get_source_config("presence"), None ) receipt_stream = self.hs.get_event_sources().sources["receipt"] - receipt, _ = yield receipt_stream.get_pagination_rows( + receipt, _ = await receipt_stream.get_pagination_rows( user, pagination_config.get_source_config("receipt"), None ) - tags_by_room = yield self.store.get_tags_for_user(user_id) + tags_by_room = await self.store.get_tags_for_user(user_id) - account_data, account_data_by_room = yield self.store.get_account_data_for_user( + account_data, account_data_by_room = await self.store.get_account_data_for_user( user_id ) - public_room_ids = yield self.store.get_public_room_ids() + public_room_ids = await self.store.get_public_room_ids() limit = pagin_config.limit if limit is None: limit = 10 - @defer.inlineCallbacks - def handle_room(event): + async def handle_room(event): d = { "room_id": event.room_id, "membership": event.membership, @@ -152,8 +147,8 @@ class InitialSyncHandler(BaseHandler): time_now = self.clock.time_msec() d["inviter"] = event.sender - invite_event = yield self.store.get_event(event.event_id) - d["invite"] = yield self._event_serializer.serialize_event( + invite_event = await self.store.get_event(event.event_id) + d["invite"] = await self._event_serializer.serialize_event( invite_event, time_now, as_client_event ) @@ -177,7 +172,7 @@ class InitialSyncHandler(BaseHandler): lambda states: states[event.event_id] ) - (messages, token), current_state = yield make_deferred_yieldable( + (messages, token), current_state = await make_deferred_yieldable( defer.gatherResults( [ run_in_background( @@ -191,7 +186,7 @@ class InitialSyncHandler(BaseHandler): ) ).addErrback(unwrapFirstError) - messages = yield filter_events_for_client( + messages = await filter_events_for_client( self.storage, user_id, messages ) @@ -201,7 +196,7 @@ class InitialSyncHandler(BaseHandler): d["messages"] = { "chunk": ( - yield self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( messages, time_now=time_now, as_client_event=as_client_event ) ), @@ -209,7 +204,7 @@ class InitialSyncHandler(BaseHandler): "end": end_token.to_string(), } - d["state"] = yield self._event_serializer.serialize_events( + d["state"] = await self._event_serializer.serialize_events( current_state.values(), time_now=time_now, as_client_event=as_client_event, @@ -232,7 +227,7 @@ class InitialSyncHandler(BaseHandler): except Exception: logger.exception("Failed to get snapshot") - yield concurrently_execute(handle_room, room_list, 10) + await concurrently_execute(handle_room, room_list, 10) account_data_events = [] for account_data_type, content in account_data.items(): @@ -256,8 +251,7 @@ class InitialSyncHandler(BaseHandler): return ret - @defer.inlineCallbacks - def room_initial_sync(self, requester, room_id, pagin_config=None): + async def room_initial_sync(self, requester, room_id, pagin_config=None): """Capture the a snapshot of a room. If user is currently a member of the room this will be what is currently in the room. If the user left the room this will be what was in the room when they left. @@ -274,32 +268,32 @@ class InitialSyncHandler(BaseHandler): A JSON serialisable dict with the snapshot of the room. """ - blocked = yield self.store.is_room_blocked(room_id) + blocked = await self.store.is_room_blocked(room_id) if blocked: raise SynapseError(403, "This room has been blocked on this server") user_id = requester.user.to_string() - membership, member_event_id = yield self._check_in_room_or_world_readable( + membership, member_event_id = await self._check_in_room_or_world_readable( room_id, user_id ) is_peeking = member_event_id is None if membership == Membership.JOIN: - result = yield self._room_initial_sync_joined( + result = await self._room_initial_sync_joined( user_id, room_id, pagin_config, membership, is_peeking ) elif membership == Membership.LEAVE: - result = yield self._room_initial_sync_parted( + result = await self._room_initial_sync_parted( user_id, room_id, pagin_config, membership, member_event_id, is_peeking ) account_data_events = [] - tags = yield self.store.get_tags_for_room(user_id, room_id) + tags = await self.store.get_tags_for_room(user_id, room_id) if tags: account_data_events.append({"type": "m.tag", "content": {"tags": tags}}) - account_data = yield self.store.get_account_data_for_room(user_id, room_id) + account_data = await self.store.get_account_data_for_room(user_id, room_id) for account_data_type, content in account_data.items(): account_data_events.append({"type": account_data_type, "content": content}) @@ -307,11 +301,10 @@ class InitialSyncHandler(BaseHandler): return result - @defer.inlineCallbacks - def _room_initial_sync_parted( + async def _room_initial_sync_parted( self, user_id, room_id, pagin_config, membership, member_event_id, is_peeking ): - room_state = yield self.state_store.get_state_for_events([member_event_id]) + room_state = await self.state_store.get_state_for_events([member_event_id]) room_state = room_state[member_event_id] @@ -319,13 +312,13 @@ class InitialSyncHandler(BaseHandler): if limit is None: limit = 10 - stream_token = yield self.store.get_stream_token_for_event(member_event_id) + stream_token = await self.store.get_stream_token_for_event(member_event_id) - messages, token = yield self.store.get_recent_events_for_room( + messages, token = await self.store.get_recent_events_for_room( room_id, limit=limit, end_token=stream_token ) - messages = yield filter_events_for_client( + messages = await filter_events_for_client( self.storage, user_id, messages, is_peeking=is_peeking ) @@ -339,13 +332,13 @@ class InitialSyncHandler(BaseHandler): "room_id": room_id, "messages": { "chunk": ( - yield self._event_serializer.serialize_events(messages, time_now) + await self._event_serializer.serialize_events(messages, time_now) ), "start": start_token.to_string(), "end": end_token.to_string(), }, "state": ( - yield self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( room_state.values(), time_now ) ), @@ -353,19 +346,18 @@ class InitialSyncHandler(BaseHandler): "receipts": [], } - @defer.inlineCallbacks - def _room_initial_sync_joined( + async def _room_initial_sync_joined( self, user_id, room_id, pagin_config, membership, is_peeking ): - current_state = yield self.state.get_current_state(room_id=room_id) + current_state = await self.state.get_current_state(room_id=room_id) # TODO: These concurrently time_now = self.clock.time_msec() - state = yield self._event_serializer.serialize_events( + state = await self._event_serializer.serialize_events( current_state.values(), time_now ) - now_token = yield self.hs.get_event_sources().get_current_token() + now_token = await self.hs.get_event_sources().get_current_token() limit = pagin_config.limit if pagin_config else None if limit is None: @@ -380,28 +372,26 @@ class InitialSyncHandler(BaseHandler): presence_handler = self.hs.get_presence_handler() - @defer.inlineCallbacks - def get_presence(): + async def get_presence(): # If presence is disabled, return an empty list if not self.hs.config.use_presence: return [] - states = yield presence_handler.get_states( + states = await presence_handler.get_states( [m.user_id for m in room_members], as_event=True ) return states - @defer.inlineCallbacks - def get_receipts(): - receipts = yield self.store.get_linearized_receipts_for_room( + async def get_receipts(): + receipts = await self.store.get_linearized_receipts_for_room( room_id, to_key=now_token.receipt_key ) if not receipts: receipts = [] return receipts - presence, receipts, (messages, token) = yield make_deferred_yieldable( + presence, receipts, (messages, token) = await make_deferred_yieldable( defer.gatherResults( [ run_in_background(get_presence), @@ -417,7 +407,7 @@ class InitialSyncHandler(BaseHandler): ).addErrback(unwrapFirstError) ) - messages = yield filter_events_for_client( + messages = await filter_events_for_client( self.storage, user_id, messages, is_peeking=is_peeking ) @@ -430,7 +420,7 @@ class InitialSyncHandler(BaseHandler): "room_id": room_id, "messages": { "chunk": ( - yield self._event_serializer.serialize_events(messages, time_now) + await self._event_serializer.serialize_events(messages, time_now) ), "start": start_token.to_string(), "end": end_token.to_string(), @@ -444,18 +434,17 @@ class InitialSyncHandler(BaseHandler): return ret - @defer.inlineCallbacks - def _check_in_room_or_world_readable(self, room_id, user_id): + async def _check_in_room_or_world_readable(self, room_id, user_id): try: # check_user_was_in_room will return the most recent membership # event for the user if: # * The user is a non-guest user, and was ever in the room # * The user is a guest user, and has joined the room # else it will throw. - member_event = yield self.auth.check_user_was_in_room(room_id, user_id) + member_event = await self.auth.check_user_was_in_room(room_id, user_id) return member_event.membership, member_event.event_id except AuthError: - visibility = yield self.state_handler.get_current_state( + visibility = await self.state_handler.get_current_state( room_id, EventTypes.RoomHistoryVisibility, "" ) if ( diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 54fa216d83..bf9add7fe2 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py
@@ -46,6 +46,7 @@ from synapse.events.validator import EventValidator from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet +from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour from synapse.storage.state import StateFilter from synapse.types import RoomAlias, UserID, create_requester from synapse.util.async_helpers import Linearizer @@ -875,7 +876,7 @@ class EventCreationHandler(object): if event.type == EventTypes.Redaction: original_event = yield self.store.get_event( event.redacts, - check_redacted=False, + redact_behaviour=EventRedactBehaviour.AS_IS, get_prev_content=False, allow_rejected=False, allow_none=True, @@ -952,7 +953,7 @@ class EventCreationHandler(object): if event.type == EventTypes.Redaction: original_event = yield self.store.get_event( event.redacts, - check_redacted=False, + redact_behaviour=EventRedactBehaviour.AS_IS, get_prev_content=False, allow_rejected=False, allow_none=True, diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 8514ddc600..00a6afc963 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py
@@ -280,8 +280,7 @@ class PaginationHandler(object): await self.storage.purge_events.purge_room(room_id) - @defer.inlineCallbacks - def get_messages( + async def get_messages( self, requester, room_id=None, @@ -307,7 +306,7 @@ class PaginationHandler(object): room_token = pagin_config.from_token.room_key else: pagin_config.from_token = ( - yield self.hs.get_event_sources().get_current_token_for_pagination() + await self.hs.get_event_sources().get_current_token_for_pagination() ) room_token = pagin_config.from_token.room_key @@ -319,11 +318,11 @@ class PaginationHandler(object): source_config = pagin_config.get_source_config("room") - with (yield self.pagination_lock.read(room_id)): + with (await self.pagination_lock.read(room_id)): ( membership, member_event_id, - ) = yield self.auth.check_in_room_or_world_readable(room_id, user_id) + ) = await self.auth.check_in_room_or_world_readable(room_id, user_id) if source_config.direction == "b": # if we're going backwards, we might need to backfill. This @@ -331,7 +330,7 @@ class PaginationHandler(object): if room_token.topological: max_topo = room_token.topological else: - max_topo = yield self.store.get_max_topological_token( + max_topo = await self.store.get_max_topological_token( room_id, room_token.stream ) @@ -339,18 +338,18 @@ class PaginationHandler(object): # If they have left the room then clamp the token to be before # they left the room, to save the effort of loading from the # database. - leave_token = yield self.store.get_topological_token_for_event( + leave_token = await self.store.get_topological_token_for_event( member_event_id ) leave_token = RoomStreamToken.parse(leave_token) if leave_token.topological < max_topo: source_config.from_key = str(leave_token) - yield self.hs.get_handlers().federation_handler.maybe_backfill( + await self.hs.get_handlers().federation_handler.maybe_backfill( room_id, max_topo ) - events, next_key = yield self.store.paginate_room_events( + events, next_key = await self.store.paginate_room_events( room_id=room_id, from_key=source_config.from_key, to_key=source_config.to_key, @@ -365,7 +364,7 @@ class PaginationHandler(object): if event_filter: events = event_filter.filter(events) - events = yield filter_events_for_client( + events = await filter_events_for_client( self.storage, user_id, events, is_peeking=(member_event_id is None) ) @@ -385,19 +384,19 @@ class PaginationHandler(object): (EventTypes.Member, event.sender) for event in events ) - state_ids = yield self.state_store.get_state_ids_for_event( + state_ids = await self.state_store.get_state_ids_for_event( events[0].event_id, state_filter=state_filter ) if state_ids: - state = yield self.store.get_events(list(state_ids.values())) + state = await self.store.get_events(list(state_ids.values())) state = state.values() time_now = self.clock.time_msec() chunk = { "chunk": ( - yield self._event_serializer.serialize_events( + await self._event_serializer.serialize_events( events, time_now, as_client_event=as_client_event ) ), @@ -406,7 +405,7 @@ class PaginationHandler(object): } if state: - chunk["state"] = yield self._event_serializer.serialize_events( + chunk["state"] = await self._event_serializer.serialize_events( state, time_now, as_client_event=as_client_event ) diff --git a/synapse/handlers/password_policy.py b/synapse/handlers/password_policy.py new file mode 100644
index 0000000000..d06b110269 --- /dev/null +++ b/synapse/handlers/password_policy.py
@@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import re + +from synapse.api.errors import Codes, PasswordRefusedError + +logger = logging.getLogger(__name__) + + +class PasswordPolicyHandler(object): + def __init__(self, hs): + self.policy = hs.config.password_policy + self.enabled = hs.config.password_policy_enabled + + # Regexps for the spec'd policy parameters. + self.regexp_digit = re.compile("[0-9]") + self.regexp_symbol = re.compile("[^a-zA-Z0-9]") + self.regexp_uppercase = re.compile("[A-Z]") + self.regexp_lowercase = re.compile("[a-z]") + + def validate_password(self, password): + """Checks whether a given password complies with the server's policy. + + Args: + password (str): The password to check against the server's policy. + + Raises: + PasswordRefusedError: The password doesn't comply with the server's policy. + """ + + if not self.enabled: + return + + minimum_accepted_length = self.policy.get("minimum_length", 0) + if len(password) < minimum_accepted_length: + raise PasswordRefusedError( + msg=( + "The password must be at least %d characters long" + % minimum_accepted_length + ), + errcode=Codes.PASSWORD_TOO_SHORT, + ) + + if ( + self.policy.get("require_digit", False) + and self.regexp_digit.search(password) is None + ): + raise PasswordRefusedError( + msg="The password must include at least one digit", + errcode=Codes.PASSWORD_NO_DIGIT, + ) + + if ( + self.policy.get("require_symbol", False) + and self.regexp_symbol.search(password) is None + ): + raise PasswordRefusedError( + msg="The password must include at least one symbol", + errcode=Codes.PASSWORD_NO_SYMBOL, + ) + + if ( + self.policy.get("require_uppercase", False) + and self.regexp_uppercase.search(password) is None + ): + raise PasswordRefusedError( + msg="The password must include at least one uppercase letter", + errcode=Codes.PASSWORD_NO_UPPERCASE, + ) + + if ( + self.policy.get("require_lowercase", False) + and self.regexp_lowercase.search(password) is None + ): + raise PasswordRefusedError( + msg="The password must include at least one lowercase letter", + errcode=Codes.PASSWORD_NO_LOWERCASE, + ) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 1e5a4613c9..e8892e1220 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py
@@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,8 +17,11 @@ import logging from six import raise_from +from six.moves import range -from twisted.internet import defer +from signedjson.sign import sign_json + +from twisted.internet import defer, reactor from synapse.api.errors import ( AuthError, @@ -27,6 +31,7 @@ from synapse.api.errors import ( StoreError, SynapseError, ) +from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import UserID, get_domain_from_id @@ -46,6 +51,8 @@ class BaseProfileHandler(BaseHandler): subclass MasterProfileHandler """ + PROFILE_REPLICATE_INTERVAL = 2 * 60 * 1000 + def __init__(self, hs): super(BaseProfileHandler, self).__init__(hs) @@ -56,6 +63,87 @@ class BaseProfileHandler(BaseHandler): self.user_directory_handler = hs.get_user_directory_handler() + self.http_client = hs.get_simple_http_client() + + self.max_avatar_size = hs.config.max_avatar_size + self.allowed_avatar_mimetypes = hs.config.allowed_avatar_mimetypes + + if hs.config.worker_app is None: + self.clock.looping_call( + self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS + ) + + if len(self.hs.config.replicate_user_profiles_to) > 0: + reactor.callWhenRunning(self._assign_profile_replication_batches) + reactor.callWhenRunning(self._replicate_profiles) + # Add a looping call to replicate_profiles: this handles retries + # if the replication is unsuccessful when the user updated their + # profile. + self.clock.looping_call( + self._replicate_profiles, self.PROFILE_REPLICATE_INTERVAL + ) + + @defer.inlineCallbacks + def _assign_profile_replication_batches(self): + """If no profile replication has been done yet, allocate replication batch + numbers to each profile to start the replication process. + """ + logger.info("Assigning profile batch numbers...") + total = 0 + while True: + assigned = yield self.store.assign_profile_batch() + total += assigned + if assigned == 0: + break + logger.info("Assigned %d profile batch numbers", total) + + @defer.inlineCallbacks + def _replicate_profiles(self): + """If any profile data has been updated and not pushed to the replication targets, + replicate it. + """ + host_batches = yield self.store.get_replication_hosts() + latest_batch = yield self.store.get_latest_profile_replication_batch_number() + if latest_batch is None: + latest_batch = -1 + for repl_host in self.hs.config.replicate_user_profiles_to: + if repl_host not in host_batches: + host_batches[repl_host] = -1 + try: + for i in range(host_batches[repl_host] + 1, latest_batch + 1): + yield self._replicate_host_profile_batch(repl_host, i) + except Exception: + logger.exception( + "Exception while replicating to %s: aborting for now", repl_host + ) + + @defer.inlineCallbacks + def _replicate_host_profile_batch(self, host, batchnum): + logger.info("Replicating profile batch %d to %s", batchnum, host) + batch_rows = yield self.store.get_profile_batch(batchnum) + batch = { + UserID(r["user_id"], self.hs.hostname).to_string(): ( + {"display_name": r["displayname"], "avatar_url": r["avatar_url"]} + if r["active"] + else None + ) + for r in batch_rows + } + + url = "https://%s/_matrix/identity/api/v1/replicate_profiles" % (host,) + body = {"batchnum": batchnum, "batch": batch, "origin_server": self.hs.hostname} + signed_body = sign_json(body, self.hs.hostname, self.hs.config.signing_key[0]) + try: + yield self.http_client.post_json_get_json(url, signed_body) + yield self.store.update_replication_batch_for_host(host, batchnum) + logger.info("Sucessfully replicated profile batch %d to %s", batchnum, host) + except Exception: + # This will get retried when the looping call next comes around + logger.exception( + "Failed to replicate profile batch %d to %s", batchnum, host + ) + raise + @defer.inlineCallbacks def get_profile(self, user_id): target_user = UserID.from_string(user_id) @@ -154,9 +242,16 @@ class BaseProfileHandler(BaseHandler): if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this homeserver") - if not by_admin and target_user != requester.user: + if not by_admin and requester and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") + if not by_admin and self.hs.config.disable_set_displayname: + profile = yield self.store.get_profileinfo(target_user.localpart) + if profile.display_name: + raise SynapseError( + 400, "Changing displayname is disabled on this server" + ) + if len(new_displayname) > MAX_DISPLAYNAME_LEN: raise SynapseError( 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,) @@ -165,7 +260,17 @@ class BaseProfileHandler(BaseHandler): if new_displayname == "": new_displayname = None - yield self.store.set_profile_displayname(target_user.localpart, new_displayname) + if len(self.hs.config.replicate_user_profiles_to) > 0: + cur_batchnum = ( + yield self.store.get_latest_profile_replication_batch_number() + ) + new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1 + else: + new_batchnum = None + + yield self.store.set_profile_displayname( + target_user.localpart, new_displayname, new_batchnum + ) if self.hs.config.user_directory_search_all_users: profile = yield self.store.get_profileinfo(target_user.localpart) @@ -173,7 +278,39 @@ class BaseProfileHandler(BaseHandler): target_user.to_string(), profile ) - yield self._update_join_states(requester, target_user) + if requester: + yield self._update_join_states(requester, target_user) + + # start a profile replication push + run_in_background(self._replicate_profiles) + + @defer.inlineCallbacks + def set_active(self, target_user, active, hide): + """ + Sets the 'active' flag on a user profile. If set to false, the user + account is considered deactivated or hidden. + + If 'hide' is true, then we interpret active=False as a request to try to + hide the user rather than deactivating it. This means withholding the + profile from replication (and mark it as inactive) rather than clearing + the profile from the HS DB. Note that unlike set_displayname and + set_avatar_url, this does *not* perform authorization checks! This is + because the only place it's used currently is in account deactivation + where we've already done these checks anyway. + """ + if len(self.hs.config.replicate_user_profiles_to) > 0: + cur_batchnum = ( + yield self.store.get_latest_profile_replication_batch_number() + ) + new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1 + else: + new_batchnum = None + yield self.store.set_profile_active( + target_user.localpart, active, hide, new_batchnum + ) + + # start a profile replication push + run_in_background(self._replicate_profiles) @defer.inlineCallbacks def get_avatar_url(self, target_user): @@ -212,12 +349,59 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") + if not by_admin and self.hs.config.disable_set_avatar_url: + profile = yield self.store.get_profileinfo(target_user.localpart) + if profile.avatar_url: + raise SynapseError( + 400, "Changing avatar url is disabled on this server" + ) + + if len(self.hs.config.replicate_user_profiles_to) > 0: + cur_batchnum = ( + yield self.store.get_latest_profile_replication_batch_number() + ) + new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1 + else: + new_batchnum = None + if len(new_avatar_url) > MAX_AVATAR_URL_LEN: raise SynapseError( 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,) ) - yield self.store.set_profile_avatar_url(target_user.localpart, new_avatar_url) + # Enforce a max avatar size if one is defined + if self.max_avatar_size or self.allowed_avatar_mimetypes: + media_id = self._validate_and_parse_media_id_from_avatar_url(new_avatar_url) + + # Check that this media exists locally + media_info = yield self.store.get_local_media(media_id) + if not media_info: + raise SynapseError( + 400, "Unknown media id supplied", errcode=Codes.NOT_FOUND + ) + + # Ensure avatar does not exceed max allowed avatar size + media_size = media_info["media_length"] + if self.max_avatar_size and media_size > self.max_avatar_size: + raise SynapseError( + 400, + "Avatars must be less than %s bytes in size" + % (self.max_avatar_size,), + errcode=Codes.TOO_LARGE, + ) + + # Ensure the avatar's file type is allowed + if ( + self.allowed_avatar_mimetypes + and media_info["media_type"] not in self.allowed_avatar_mimetypes + ): + raise SynapseError( + 400, "Avatar file type '%s' not allowed" % media_info["media_type"] + ) + + yield self.store.set_profile_avatar_url( + target_user.localpart, new_avatar_url, new_batchnum + ) if self.hs.config.user_directory_search_all_users: profile = yield self.store.get_profileinfo(target_user.localpart) @@ -227,6 +411,23 @@ class BaseProfileHandler(BaseHandler): yield self._update_join_states(requester, target_user) + # start a profile replication push + run_in_background(self._replicate_profiles) + + def _validate_and_parse_media_id_from_avatar_url(self, mxc): + """Validate and parse a provided avatar url and return the local media id + + Args: + mxc (str): A mxc URL + + Returns: + str: The ID of the media + """ + avatar_pieces = mxc.split("/") + if len(avatar_pieces) != 4 or avatar_pieces[0] != "mxc:": + raise SynapseError(400, "Invalid avatar URL '%s' supplied" % mxc) + return avatar_pieces[-1] + @defer.inlineCallbacks def on_profile_query(self, args): user = UserID.from_string(args["user_id"]) @@ -282,7 +483,7 @@ class BaseProfileHandler(BaseHandler): @defer.inlineCallbacks def check_profile_query_allowed(self, target_user, requester=None): """Checks whether a profile query is allowed. If the - 'require_auth_for_profile_requests' config flag is set to True and a + 'limit_profile_requests_to_known_users' config flag is set to True and a 'requester' is provided, the query is only allowed if the two users share a room. @@ -300,7 +501,11 @@ class BaseProfileHandler(BaseHandler): # be None when this function is called outside of a profile query, e.g. # when building a membership event. In this case, we must allow the # lookup. - if not self.hs.config.require_auth_for_profile_requests or not requester: + if not self.hs.config.limit_profile_requests_to_known_users or not requester: + return + + # Always allow the user to query their own profile. + if target_user.to_string() == requester.to_string(): return # Always allow the user to query their own profile. diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 8a7d965feb..099611d79e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py
@@ -55,6 +55,7 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() + self.http_client = hs.get_simple_http_client() self.identity_handler = self.hs.get_handlers().identity_handler self.ratelimiter = hs.get_registration_ratelimiter() @@ -67,6 +68,8 @@ class RegistrationHandler(BaseHandler): ) self._server_notices_mxid = hs.config.server_notices_mxid + self._show_in_user_directory = self.hs.config.show_users_in_user_directory + if hs.config.worker_app: self._register_client = ReplicationRegisterServlet.make_client(hs) self._register_device_client = RegisterDeviceReplicationServlet.make_client( @@ -209,6 +212,11 @@ class RegistrationHandler(BaseHandler): address=address, ) + if default_display_name: + yield self.profile_handler.set_displayname( + user, None, default_display_name, by_admin=True + ) + if self.hs.config.user_directory_search_all_users: profile = yield self.store.get_profileinfo(localpart) yield self.user_directory_handler.handle_local_profile_change( @@ -239,6 +247,10 @@ class RegistrationHandler(BaseHandler): address=address, ) + yield self.profile_handler.set_displayname( + user, None, default_display_name, by_admin=True + ) + # Successfully registered break except SynapseError: @@ -268,6 +280,14 @@ class RegistrationHandler(BaseHandler): # Bind email to new account yield self._register_email_threepid(user_id, threepid_dict, None) + # Prevent the new user from showing up in the user directory if the server + # mandates it. + if not self._show_in_user_directory: + yield self.store.add_account_data_for_user( + user_id, "im.vector.hide_profile", {"hide_profile": True} + ) + yield self.profile_handler.set_active(user, False, True) + return user_id @defer.inlineCallbacks @@ -334,7 +354,9 @@ class RegistrationHandler(BaseHandler): yield self._auto_join_rooms(user_id) @defer.inlineCallbacks - def appservice_register(self, user_localpart, as_token): + def appservice_register(self, user_localpart, as_token, password, display_name): + # FIXME: this should be factored out and merged with normal register() + user = UserID(user_localpart, self.hs.hostname) user_id = user.to_string() service = self.store.get_app_service_by_token(as_token) @@ -353,12 +375,29 @@ class RegistrationHandler(BaseHandler): user_id, allowed_appservice=service ) + password_hash = "" + if password: + password_hash = yield self.auth_handler().hash(password) + + display_name = display_name or user.localpart + yield self.register_with_store( user_id=user_id, - password_hash="", + password_hash=password_hash, appservice_id=service_id, - create_profile_with_displayname=user.localpart, + create_profile_with_displayname=display_name, ) + + yield self.profile_handler.set_displayname( + user, None, display_name, by_admin=True + ) + + if self.hs.config.user_directory_search_all_users: + profile = yield self.store.get_profileinfo(user_localpart) + yield self.user_directory_handler.handle_local_profile_change( + user_id, profile + ) + return user_id def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None): @@ -386,6 +425,39 @@ class RegistrationHandler(BaseHandler): ) @defer.inlineCallbacks + def shadow_register(self, localpart, display_name, auth_result, params): + """Invokes the current registration on another server, using + shared secret registration, passing in any auth_results from + other registration UI auth flows (e.g. validated 3pids) + Useful for setting up shadow/backup accounts on a parallel deployment. + """ + + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + yield self.http_client.post_json_get_json( + "%s/_matrix/client/r0/register?access_token=%s" % (shadow_hs_url, as_token), + { + # XXX: auth_result is an unspecified extension for shadow registration + "auth_result": auth_result, + # XXX: another unspecified extension for shadow registration to ensure + # that the displayname is correctly set by the masters erver + "display_name": display_name, + "username": localpart, + "password": params.get("password"), + "bind_email": params.get("bind_email"), + "bind_msisdn": params.get("bind_msisdn"), + "device_id": params.get("device_id"), + "initial_device_display_name": params.get( + "initial_device_display_name" + ), + "inhibit_login": False, + "access_token": as_token, + }, + ) + + @defer.inlineCallbacks def _generate_user_id(self): if self._next_generated_user_id is None: with (yield self._generate_user_id_linearizer.queue(())): diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 22768e97ff..e20eadcae2 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py
@@ -54,12 +54,14 @@ class RoomCreationHandler(BaseHandler): "history_visibility": "shared", "original_invitees_have_ops": False, "guest_can_join": True, + "encryption_alg": "m.megolm.v1.aes-sha2", }, RoomCreationPreset.TRUSTED_PRIVATE_CHAT: { "join_rules": JoinRules.INVITE, "history_visibility": "shared", "original_invitees_have_ops": True, "guest_can_join": True, + "encryption_alg": "m.megolm.v1.aes-sha2", }, RoomCreationPreset.PUBLIC_CHAT: { "join_rules": JoinRules.PUBLIC, @@ -303,7 +305,19 @@ class RoomCreationHandler(BaseHandler): """ user_id = requester.user.to_string() - if not self.spam_checker.user_may_create_room(user_id): + if ( + self._server_notices_mxid is not None + and requester.user.to_string() == self._server_notices_mxid + ): + # allow the server notices mxid to create rooms + is_requester_admin = True + + else: + is_requester_admin = yield self.auth.is_server_admin(requester.user) + + if not is_requester_admin and not self.spam_checker.user_may_create_room( + user_id, invite_list=[], third_party_invite_list=[], cloning=True + ): raise SynapseError(403, "You are not permitted to create rooms") creation_content = { @@ -551,8 +565,14 @@ class RoomCreationHandler(BaseHandler): requester, config, is_requester_admin=is_requester_admin ) + invite_list = config.get("invite", []) + invite_3pid_list = config.get("invite_3pid", []) + if not is_requester_admin and not self.spam_checker.user_may_create_room( - user_id + user_id, + invite_list=invite_list, + third_party_invite_list=invite_3pid_list, + cloning=False, ): raise SynapseError(403, "You are not permitted to create rooms") @@ -586,7 +606,6 @@ class RoomCreationHandler(BaseHandler): else: room_alias = None - invite_list = config.get("invite", []) for i in invite_list: try: uid = UserID.from_string(i) @@ -608,8 +627,6 @@ class RoomCreationHandler(BaseHandler): % (user_id,), ) - invite_3pid_list = config.get("invite_3pid", []) - visibility = config.get("visibility", None) is_public = visibility == "public" @@ -697,6 +714,7 @@ class RoomCreationHandler(BaseHandler): "invite", ratelimit=False, content=content, + new_room=True, ) for invite_3pid in invite_3pid_list: @@ -712,6 +730,7 @@ class RoomCreationHandler(BaseHandler): id_server, requester, txn_id=None, + new_room=True, id_access_token=id_access_token, ) @@ -769,6 +788,7 @@ class RoomCreationHandler(BaseHandler): "join", ratelimit=False, content=creator_join_profile, + new_room=True, ) # We treat the power levels override specially as this needs to be one @@ -830,6 +850,13 @@ class RoomCreationHandler(BaseHandler): for (etype, state_key), content in initial_state.items(): yield send(etype=etype, state_key=state_key, content=content) + if "encryption_alg" in config: + yield send( + etype=EventTypes.Encryption, + state_key="", + content={"algorithm": config["encryption_alg"]}, + ) + @defer.inlineCallbacks def _generate_room_id(self, creator_id, is_public): # autogen room IDs and try to create it. We may clash, so just diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 7b7270fc61..9a58c14eb7 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py
@@ -24,13 +24,20 @@ from twisted.internet import defer from synapse import types from synapse.api.constants import EventTypes, Membership +from synapse.api.ratelimiting import Ratelimiter +from synapse.api.errors import ( + AuthError, + Codes, + HttpResponseException, + SynapseError, +) +from synapse.handlers.identity import LookupAlgorithm, create_id_access_token_header +from synapse.http.client import SimpleHttpClient from synapse.api.errors import AuthError, Codes, SynapseError from synapse.types import RoomID, UserID from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room, user_left_room -from ._base import BaseHandler - logger = logging.getLogger(__name__) @@ -60,6 +67,7 @@ class RoomMemberHandler(object): self.registration_handler = hs.get_registration_handler() self.profile_handler = hs.get_profile_handler() self.event_creation_handler = hs.get_event_creation_handler() + self.identity_handler = hs.get_handlers().identity_handler self.member_linearizer = Linearizer(name="member") @@ -67,13 +75,10 @@ class RoomMemberHandler(object): self.spam_checker = hs.get_spam_checker() self.third_party_event_rules = hs.get_third_party_event_rules() self._server_notices_mxid = self.config.server_notices_mxid + self.rewrite_identity_server_urls = self.config.rewrite_identity_server_urls self._enable_lookup = hs.config.enable_3pid_lookup self.allow_per_room_profiles = self.config.allow_per_room_profiles - - # This is only used to get at ratelimit function, and - # maybe_kick_guest_users. It's fine there are multiple of these as - # it doesn't store state. - self.base_handler = BaseHandler(hs) + self.ratelimiter = Ratelimiter() @abc.abstractmethod def _remote_join(self, requester, remote_room_hosts, room_id, user, content): @@ -265,8 +270,31 @@ class RoomMemberHandler(object): third_party_signed=None, ratelimit=True, content=None, + new_room=False, require_consent=True, ): + """Update a users membership in a room + + Args: + requester (Requester) + target (UserID) + room_id (str) + action (str): The "action" the requester is performing against the + target. One of join/leave/kick/ban/invite/unban. + txn_id (str|None): The transaction ID associated with the request, + or None not provided. + remote_room_hosts (list[str]|None): List of remote servers to try + and join via if server isn't already in the room. + third_party_signed (dict|None): The signed object for third party + invites. + ratelimit (bool): Whether to apply ratelimiting to this request. + content (dict|None): Fields to include in the new events content. + new_room (bool): Whether these membership changes are happening + as part of a room creation (e.g. initial joins and invites) + + Returns: + Deferred[FrozenEvent] + """ key = (room_id,) with (yield self.member_linearizer.queue(key)): @@ -280,6 +308,7 @@ class RoomMemberHandler(object): third_party_signed=third_party_signed, ratelimit=ratelimit, content=content, + new_room=new_room, require_consent=require_consent, ) @@ -297,6 +326,7 @@ class RoomMemberHandler(object): third_party_signed=None, ratelimit=True, content=None, + new_room=False, require_consent=True, ): content_specified = bool(content) @@ -361,8 +391,15 @@ class RoomMemberHandler(object): ) block_invite = True + is_published = yield self.store.is_room_published(room_id) + if not self.spam_checker.user_may_invite( - requester.user.to_string(), target.to_string(), room_id + requester.user.to_string(), + target.to_string(), + third_party_invite=None, + room_id=room_id, + new_room=new_room, + published_room=is_published, ): logger.info("Blocking invite due to spam checker") block_invite = True @@ -435,8 +472,26 @@ class RoomMemberHandler(object): # so don't really fit into the general auth process. raise AuthError(403, "Guest access not allowed") + if ( + self._server_notices_mxid is not None + and requester.user.to_string() == self._server_notices_mxid + ): + # allow the server notices mxid to join rooms + is_requester_admin = True + + else: + is_requester_admin = yield self.auth.is_server_admin(requester.user) + + inviter = yield self._get_inviter(target.to_string(), room_id) + if not is_requester_admin: + # We assume that if the spam checker allowed the user to create + # a room then they're allowed to join it. + if not new_room and not self.spam_checker.user_may_join_room( + target.to_string(), room_id, is_invited=inviter is not None + ): + raise SynapseError(403, "Not allowed to join this room") + if not is_host_in_room: - inviter = yield self._get_inviter(target.to_string(), room_id) if inviter and not self.hs.is_mine(inviter): remote_room_hosts.append(inviter.domain) @@ -705,6 +760,7 @@ class RoomMemberHandler(object): id_server, requester, txn_id, + new_room=False, id_access_token=None, ): if self.config.block_non_admin_invites: @@ -716,7 +772,23 @@ class RoomMemberHandler(object): # We need to rate limit *before* we send out any 3PID invites, so we # can't just rely on the standard ratelimiting of events. - yield self.base_handler.ratelimit(requester) + self.ratelimiter.ratelimit( + requester.user.to_string(), + time_now_s=self.hs.clock.time(), + rate_hz=self.hs.config.rc_third_party_invite.per_second, + burst_count=self.hs.config.rc_third_party_invite.burst_count, + update=True, + ) + + can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited( + medium, address, room_id + ) + if not can_invite: + raise SynapseError( + 403, + "This third-party identifier can not be invited in this room", + Codes.FORBIDDEN, + ) can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited( medium, address, room_id @@ -737,6 +809,19 @@ class RoomMemberHandler(object): id_server, medium, address, id_access_token ) + is_published = yield self.store.is_room_published(room_id) + + if not self.spam_checker.user_may_invite( + requester.user.to_string(), + invitee, + third_party_invite={"medium": medium, "address": address}, + room_id=room_id, + new_room=new_room, + published_room=is_published, + ): + logger.info("Blocking invite due to spam checker") + raise SynapseError(403, "Invites have been disabled on this server") + if invitee: yield self.update_membership( requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py
index cc9e6b9bd0..0082f85c26 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py
@@ -13,20 +13,36 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import re +from typing import Tuple import attr import saml2 +import saml2.response from saml2.client import Saml2Client from synapse.api.errors import SynapseError +from synapse.config import ConfigError from synapse.http.servlet import parse_string from synapse.rest.client.v1.login import SSOAuthHandler -from synapse.types import UserID, map_username_to_mxid_localpart +from synapse.types import ( + UserID, + map_username_to_mxid_localpart, + mxid_localpart_allowed_characters, +) from synapse.util.async_helpers import Linearizer logger = logging.getLogger(__name__) +@attr.s +class Saml2SessionData: + """Data we track about SAML2 sessions""" + + # time the session was created, in milliseconds + creation_time = attr.ib() + + class SamlHandler: def __init__(self, hs): self._saml_client = Saml2Client(hs.config.saml2_sp_config) @@ -37,11 +53,14 @@ class SamlHandler: self._datastore = hs.get_datastore() self._hostname = hs.hostname self._saml2_session_lifetime = hs.config.saml2_session_lifetime - self._mxid_source_attribute = hs.config.saml2_mxid_source_attribute self._grandfathered_mxid_source_attribute = ( hs.config.saml2_grandfathered_mxid_source_attribute ) - self._mxid_mapper = hs.config.saml2_mxid_mapper + + # plugin to do custom mapping from saml response to mxid + self._user_mapping_provider = hs.config.saml2_user_mapping_provider_class( + hs.config.saml2_user_mapping_provider_config + ) # identifier for the external_ids table self._auth_provider_id = "saml" @@ -118,22 +137,10 @@ class SamlHandler: remote_user_id = saml2_auth.ava["uid"][0] except KeyError: logger.warning("SAML2 response lacks a 'uid' attestation") - raise SynapseError(400, "uid not in SAML2 response") - - try: - mxid_source = saml2_auth.ava[self._mxid_source_attribute][0] - except KeyError: - logger.warning( - "SAML2 response lacks a '%s' attestation", self._mxid_source_attribute - ) - raise SynapseError( - 400, "%s not in SAML2 response" % (self._mxid_source_attribute,) - ) + raise SynapseError(400, "'uid' not in SAML2 response") self._outstanding_requests_dict.pop(saml2_auth.in_response_to, None) - displayName = saml2_auth.ava.get("displayName", [None])[0] - with (await self._mapping_lock.queue(self._auth_provider_id)): # first of all, check if we already have a mapping for this user logger.info( @@ -173,22 +180,46 @@ class SamlHandler: ) return registered_user_id - # figure out a new mxid for this user - base_mxid_localpart = self._mxid_mapper(mxid_source) + # Map saml response to user attributes using the configured mapping provider + for i in range(1000): + attribute_dict = self._user_mapping_provider.saml_response_to_user_attributes( + saml2_auth, i + ) + + logger.debug( + "Retrieved SAML attributes from user mapping provider: %s " + "(attempt %d)", + attribute_dict, + i, + ) + + localpart = attribute_dict.get("mxid_localpart") + if not localpart: + logger.error( + "SAML mapping provider plugin did not return a " + "mxid_localpart object" + ) + raise SynapseError(500, "Error parsing SAML2 response") - suffix = 0 - while True: - localpart = base_mxid_localpart + (str(suffix) if suffix else "") + displayname = attribute_dict.get("displayname") + + # Check if this mxid already exists if not await self._datastore.get_users_by_id_case_insensitive( UserID(localpart, self._hostname).to_string() ): + # This mxid is free break - suffix += 1 - logger.info("Allocating mxid for new user with localpart %s", localpart) + else: + # Unable to generate a username in 1000 iterations + # Break and return error to the user + raise SynapseError( + 500, "Unable to generate a Matrix ID from the SAML response" + ) registered_user_id = await self._registration_handler.register_user( - localpart=localpart, default_display_name=displayName + localpart=localpart, default_display_name=displayname ) + await self._datastore.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id ) @@ -205,9 +236,120 @@ class SamlHandler: del self._outstanding_requests_dict[reqid] +DOT_REPLACE_PATTERN = re.compile( + ("[^%s]" % (re.escape("".join(mxid_localpart_allowed_characters)),)) +) + + +def dot_replace_for_mxid(username: str) -> str: + username = username.lower() + username = DOT_REPLACE_PATTERN.sub(".", username) + + # regular mxids aren't allowed to start with an underscore either + username = re.sub("^_", "", username) + return username + + +MXID_MAPPER_MAP = { + "hexencode": map_username_to_mxid_localpart, + "dotreplace": dot_replace_for_mxid, +} + + @attr.s -class Saml2SessionData: - """Data we track about SAML2 sessions""" +class SamlConfig(object): + mxid_source_attribute = attr.ib() + mxid_mapper = attr.ib() - # time the session was created, in milliseconds - creation_time = attr.ib() + +class DefaultSamlMappingProvider(object): + __version__ = "0.0.1" + + def __init__(self, parsed_config: SamlConfig): + """The default SAML user mapping provider + + Args: + parsed_config: Module configuration + """ + self._mxid_source_attribute = parsed_config.mxid_source_attribute + self._mxid_mapper = parsed_config.mxid_mapper + + def saml_response_to_user_attributes( + self, saml_response: saml2.response.AuthnResponse, failures: int = 0, + ) -> dict: + """Maps some text from a SAML response to attributes of a new user + + Args: + saml_response: A SAML auth response object + + failures: How many times a call to this function with this + saml_response has resulted in a failure + + Returns: + dict: A dict containing new user attributes. Possible keys: + * mxid_localpart (str): Required. The localpart of the user's mxid + * displayname (str): The displayname of the user + """ + try: + mxid_source = saml_response.ava[self._mxid_source_attribute][0] + except KeyError: + logger.warning( + "SAML2 response lacks a '%s' attestation", self._mxid_source_attribute, + ) + raise SynapseError( + 400, "%s not in SAML2 response" % (self._mxid_source_attribute,) + ) + + # Use the configured mapper for this mxid_source + base_mxid_localpart = self._mxid_mapper(mxid_source) + + # Append suffix integer if last call to this function failed to produce + # a usable mxid + localpart = base_mxid_localpart + (str(failures) if failures else "") + + # Retrieve the display name from the saml response + # If displayname is None, the mxid_localpart will be used instead + displayname = saml_response.ava.get("displayName", [None])[0] + + return { + "mxid_localpart": localpart, + "displayname": displayname, + } + + @staticmethod + def parse_config(config: dict) -> SamlConfig: + """Parse the dict provided by the homeserver's config + Args: + config: A dictionary containing configuration options for this provider + Returns: + SamlConfig: A custom config object for this module + """ + # Parse config options and use defaults where necessary + mxid_source_attribute = config.get("mxid_source_attribute", "uid") + mapping_type = config.get("mxid_mapping", "hexencode") + + # Retrieve the associating mapping function + try: + mxid_mapper = MXID_MAPPER_MAP[mapping_type] + except KeyError: + raise ConfigError( + "saml2_config.user_mapping_provider.config: '%s' is not a valid " + "mxid_mapping value" % (mapping_type,) + ) + + return SamlConfig(mxid_source_attribute, mxid_mapper) + + @staticmethod + def get_saml_attributes(config: SamlConfig) -> Tuple[set, set]: + """Returns the required attributes of a SAML + + Args: + config: A SamlConfig object containing configuration params for this provider + + Returns: + tuple[set,set]: The first set equates to the saml auth response + attributes that are required for the module to function, whereas the + second set consists of those attributes which can be used if + available, but are not necessary + """ + return {"uid", config.mxid_source_attribute}, {"displayName"} diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index 56ed262a1f..ef750d1497 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py
@@ -21,7 +21,7 @@ from unpaddedbase64 import decode_base64, encode_base64 from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import SynapseError +from synapse.api.errors import NotFoundError, SynapseError from synapse.api.filtering import Filter from synapse.storage.state import StateFilter from synapse.visibility import filter_events_for_client @@ -37,6 +37,7 @@ class SearchHandler(BaseHandler): self._event_serializer = hs.get_event_client_serializer() self.storage = hs.get_storage() self.state_store = self.storage.state + self.auth = hs.get_auth() @defer.inlineCallbacks def get_old_rooms_from_upgraded_room(self, room_id): @@ -53,23 +54,38 @@ class SearchHandler(BaseHandler): room_id (str): id of the room to search through. Returns: - Deferred[iterable[unicode]]: predecessor room ids + Deferred[iterable[str]]: predecessor room ids """ historical_room_ids = [] - while True: - predecessor = yield self.store.get_room_predecessor(room_id) + # The initial room must have been known for us to get this far + predecessor = yield self.store.get_room_predecessor(room_id) - # If no predecessor, assume we've hit a dead end + while True: if not predecessor: + # We have reached the end of the chain of predecessors + break + + if not isinstance(predecessor.get("room_id"), str): + # This predecessor object is malformed. Exit here + break + + predecessor_room_id = predecessor["room_id"] + + # Don't add it to the list until we have checked that we are in the room + try: + next_predecessor_room = yield self.store.get_room_predecessor( + predecessor_room_id + ) + except NotFoundError: + # The predecessor is not a known room, so we are done here break - # Add predecessor's room ID - historical_room_ids.append(predecessor["room_id"]) + historical_room_ids.append(predecessor_room_id) - # Scan through the old room for further predecessors - room_id = predecessor["room_id"] + # And repeat + predecessor = next_predecessor_room return historical_room_ids diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index d90c9e0108..3f50d6de47 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py
@@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2017 New Vector Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,12 +31,15 @@ class SetPasswordHandler(BaseHandler): super(SetPasswordHandler, self).__init__(hs) self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() + self._password_policy_handler = hs.get_password_policy_handler() @defer.inlineCallbacks def set_password(self, user_id, newpassword, requester=None): if not self.hs.config.password_localdb_enabled: raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN) + self._password_policy_handler.validate_password(newpassword) + password_hash = yield self._auth_handler.hash(newpassword) except_device_id = requester.device_id if requester else None diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py
index 03934956f4..c0b9384189 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py
@@ -171,7 +171,7 @@ class LogProducer(object): def stopProducing(self): self._paused = True - self._buffer = None + self._buffer = deque() def resumeProducing(self): self._paused = False diff --git a/synapse/logging/context.py b/synapse/logging/context.py
index 2c1fb9ddac..33b322209d 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py
@@ -23,6 +23,7 @@ them. See doc/log_contexts.rst for details on how this works. """ +import inspect import logging import threading import types @@ -404,6 +405,9 @@ class LoggingContext(object): """ current = get_thread_resource_usage() + # Indicate to mypy that we know that self.usage_start is None. + assert self.usage_start is not None + utime_delta = current.ru_utime - self.usage_start.ru_utime stime_delta = current.ru_stime - self.usage_start.ru_stime @@ -612,7 +616,8 @@ def run_in_background(f, *args, **kwargs): def make_deferred_yieldable(deferred): - """Given a deferred, make it follow the Synapse logcontext rules: + """Given a deferred (or coroutine), make it follow the Synapse logcontext + rules: If the deferred has completed (or is not actually a Deferred), essentially does nothing (just returns another completed deferred with the @@ -624,6 +629,13 @@ def make_deferred_yieldable(deferred): (This is more-or-less the opposite operation to run_in_background.) """ + if inspect.isawaitable(deferred): + # If we're given a coroutine we convert it to a deferred so that we + # run it and find out if it immediately finishes, it it does then we + # don't need to fiddle with log contexts at all and can return + # immediately. + deferred = defer.ensureDeferred(deferred) + if not isinstance(deferred, defer.Deferred): return deferred diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 4a1fc2ec2b..14eca70ba4 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py
@@ -41,6 +41,7 @@ from synapse.rest.client.v2_alpha import ( keys, notifications, openid, + password_policy, read_marker, receipts, register, @@ -117,6 +118,7 @@ class ClientRestResource(JsonResource): room_upgrade_rest_servlet.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource) account_validity.register_servlets(hs, client_resource) + password_policy.register_servlets(hs, client_resource) relations.register_servlets(hs, client_resource) # moving to /_synapse/admin diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index e7fe50ed72..165313b572 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py
@@ -14,6 +14,7 @@ # limitations under the License. """ This module contains REST servlets to do with profile: /profile/<paths> """ +from twisted.internet import defer from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -28,6 +29,7 @@ class ProfileDisplaynameRestServlet(RestServlet): super(ProfileDisplaynameRestServlet, self).__init__() self.hs = hs self.profile_handler = hs.get_profile_handler() + self.http_client = hs.get_simple_http_client() self.auth = hs.get_auth() async def on_GET(self, request, user_id): @@ -63,11 +65,27 @@ class ProfileDisplaynameRestServlet(RestServlet): await self.profile_handler.set_displayname(user, requester, new_name, is_admin) + if self.hs.config.shadow_server: + shadow_user = UserID(user.localpart, self.hs.config.shadow_server.get("hs")) + self.shadow_displayname(shadow_user.to_string(), content) + return 200, {} def on_OPTIONS(self, request, user_id): return 200, {} + @defer.inlineCallbacks + def shadow_displayname(self, user_id, body): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + yield self.http_client.put_json( + "%s/_matrix/client/r0/profile/%s/displayname?access_token=%s&user_id=%s" + % (shadow_hs_url, user_id, as_token, user_id), + body, + ) + class ProfileAvatarURLRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/avatar_url", v1=True) @@ -76,6 +94,7 @@ class ProfileAvatarURLRestServlet(RestServlet): super(ProfileAvatarURLRestServlet, self).__init__() self.hs = hs self.profile_handler = hs.get_profile_handler() + self.http_client = hs.get_simple_http_client() self.auth = hs.get_auth() async def on_GET(self, request, user_id): @@ -114,11 +133,27 @@ class ProfileAvatarURLRestServlet(RestServlet): user, requester, new_avatar_url, is_admin ) + if self.hs.config.shadow_server: + shadow_user = UserID(user.localpart, self.hs.config.shadow_server.get("hs")) + self.shadow_avatar_url(shadow_user.to_string(), content) + return 200, {} def on_OPTIONS(self, request, user_id): return 200, {} + @defer.inlineCallbacks + def shadow_avatar_url(self, user_id, body): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + yield self.http_client.put_json( + "%s/_matrix/client/r0/profile/%s/avatar_url?access_token=%s&user_id=%s" + % (shadow_hs_url, user_id, as_token, user_id), + body, + ) + class ProfileRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)", v1=True) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 711d4ad304..44517b179e 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py
@@ -704,7 +704,8 @@ class RoomMembershipRestServlet(TransactionRestServlet): content["id_server"], requester, txn_id, - content.get("id_access_token"), + new_room=False, + id_access_token=content.get("id_access_token"), ) return 200, {} diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index fc240f5cf8..2085e3b777 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py
@@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2018, 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,9 +15,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import re from six.moves import http_client +from twisted.internet import defer + from synapse.api.constants import LoginType from synapse.api.errors import Codes, SynapseError, ThreepidValidationError from synapse.config.emailconfig import ThreepidBehaviour @@ -28,8 +31,10 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.types import UserID from synapse.push.mailer import Mailer, load_jinja2_templates from synapse.util.msisdn import phone_number_to_msisdn +from synapse.util.stringutils import assert_valid_client_secret, random_string from synapse.util.threepids import check_3pid_allowed from ._base import client_patterns, interactive_auth_handler @@ -81,6 +86,8 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): # Extract params from body client_secret = body["client_secret"] + assert_valid_client_secret(client_secret) + email = body["email"] send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param @@ -88,7 +95,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): if not check_3pid_allowed(self.hs, "email", email): raise SynapseError( 403, - "Your email domain is not authorized on this server", + "Your email is not authorized on this server", Codes.THREEPID_DENIED, ) @@ -167,6 +174,9 @@ class PasswordResetSubmitTokenServlet(RestServlet): sid = parse_string(request, "sid", required=True) client_secret = parse_string(request, "client_secret", required=True) + + assert_valid_client_secret(client_secret) + token = parse_string(request, "token", required=True) # Attempt to validate a 3PID session @@ -212,6 +222,7 @@ class PasswordRestServlet(RestServlet): self.auth_handler = hs.get_auth_handler() self.datastore = self.hs.get_datastore() self._set_password_handler = hs.get_set_password_handler() + self.http_client = hs.get_simple_http_client() @interactive_auth_handler async def on_POST(self, request): @@ -229,9 +240,13 @@ class PasswordRestServlet(RestServlet): if self.auth.has_access_token(request): requester = await self.auth.get_user_by_req(request) - params = await self.auth_handler.validate_user_via_ui_auth( - requester, body, self.hs.get_ip_from_request(request) - ) + # blindly trust ASes without UI-authing them + if requester.app_service: + params = body + else: + params = await self.auth_handler.validate_user_via_ui_auth( + requester, body, self.hs.get_ip_from_request(request) + ) user_id = requester.user.to_string() else: requester = None @@ -264,11 +279,29 @@ class PasswordRestServlet(RestServlet): await self._set_password_handler.set_password(user_id, new_password, requester) + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + self.shadow_password(params, shadow_user.to_string()) + return 200, {} def on_OPTIONS(self, _): return 200, {} + @defer.inlineCallbacks + def shadow_password(self, body, user_id): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + yield self.http_client.post_json_get_json( + "%s/_matrix/client/r0/account/password?access_token=%s&user_id=%s" + % (shadow_hs_url, as_token, user_id), + body, + ) + class DeactivateAccountRestServlet(RestServlet): PATTERNS = client_patterns("/account/deactivate$") @@ -357,13 +390,15 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param - if not check_3pid_allowed(self.hs, "email", email): + if not (await check_3pid_allowed(self.hs, "email", email)): raise SynapseError( 403, - "Your email domain is not authorized on this server", + "Your email is not authorized on this server", Codes.THREEPID_DENIED, ) + assert_valid_client_secret(body["client_secret"]) + existing_user_id = await self.store.get_user_id_by_threepid( "email", body["email"] ) @@ -420,13 +455,15 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): msisdn = phone_number_to_msisdn(country, phone_number) - if not check_3pid_allowed(self.hs, "msisdn", msisdn): + if not (await check_3pid_allowed(self.hs, "msisdn", msisdn)): raise SynapseError( 403, "Account phone numbers are not authorized on this server", Codes.THREEPID_DENIED, ) + assert_valid_client_secret(body["client_secret"]) + existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn) if existing_user_id is not None: @@ -579,7 +616,8 @@ class ThreepidRestServlet(RestServlet): self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() - self.datastore = self.hs.get_datastore() + self.datastore = hs.get_datastore() + self.http_client = hs.get_simple_http_client() async def on_GET(self, request): requester = await self.auth.get_user_by_req(request) @@ -589,10 +627,33 @@ class ThreepidRestServlet(RestServlet): return 200, {"threepids": threepids} async def on_POST(self, request): + if self.hs.config.disable_3pid_changes: + raise SynapseError(400, "3PID changes disabled on this server") + requester = await self.auth.get_user_by_req(request) user_id = requester.user.to_string() body = parse_json_object_from_request(request) + # skip validation if this is a shadow 3PID from an AS + if requester.app_service: + # XXX: ASes pass in a validated threepid directly to bypass the IS. + # This makes the API entirely change shape when we have an AS token; + # it really should be an entirely separate API - perhaps + # /account/3pid/replicate or something. + threepid = body.get("threepid") + + await self.auth_handler.add_threepid( + user_id, threepid["medium"], threepid["address"], threepid["validated_at"] + ) + + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + self.shadow_3pid({"threepid": threepid}, shadow_user.to_string()) + + return 200, {} + threepid_creds = body.get("threePidCreds") or body.get("three_pid_creds") if threepid_creds is None: raise SynapseError( @@ -613,12 +674,36 @@ class ThreepidRestServlet(RestServlet): validation_session["address"], validation_session["validated_at"], ) + + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + threepid = { + "medium": validation_session["medium"], + "address": validation_session["address"], + "validated_at": validation_session["validated_at"], + } + self.shadow_3pid({"threepid": threepid}, shadow_user.to_string()) + return 200, {} raise SynapseError( 400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED ) + @defer.inlineCallbacks + def shadow_3pid(self, body, user_id): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + yield self.http_client.post_json_get_json( + "%s/_matrix/client/r0/account/3pid?access_token=%s&user_id=%s" + % (shadow_hs_url, as_token, user_id), + body, + ) + class ThreepidAddRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/add$", releases=(), unstable=True) @@ -654,6 +739,16 @@ class ThreepidAddRestServlet(RestServlet): validation_session["address"], validation_session["validated_at"], ) + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + threepid = { + "medium": validation_session["medium"], + "address": validation_session["address"], + "validated_at": validation_session["validated_at"], + } + self.shadow_3pid({"threepid": threepid}, shadow_user.to_string()) return 200, {} raise SynapseError( @@ -688,6 +783,29 @@ class ThreepidBindRestServlet(RestServlet): return 200, {} + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + threepid = { + "medium": validation_session["medium"], + "address": validation_session["address"], + "validated_at": validation_session["validated_at"], + } + self.shadow_3pid({"threepid": threepid}, shadow_user.to_string()) + + @defer.inlineCallbacks + def shadow_3pid(self, body, user_id): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + yield self.http_client.post_json_get_json( + "%s/_matrix/client/r0/account/3pid?access_token=%s&user_id=%s" + % (shadow_hs_url, as_token, user_id), + body, + ) + class ThreepidUnbindRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/unbind$", releases=(), unstable=True) @@ -725,10 +843,15 @@ class ThreepidDeleteRestServlet(RestServlet): def __init__(self, hs): super(ThreepidDeleteRestServlet, self).__init__() + self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() + self.http_client = hs.get_simple_http_client() async def on_POST(self, request): + if self.hs.config.disable_3pid_changes: + raise SynapseError(400, "3PID changes disabled on this server") + body = parse_json_object_from_request(request) assert_params_in_dict(body, ["medium", "address"]) @@ -746,6 +869,12 @@ class ThreepidDeleteRestServlet(RestServlet): logger.exception("Failed to remove threepid") raise SynapseError(500, "Failed to remove threepid") + if self.hs.config.shadow_server: + shadow_user = UserID( + requester.user.localpart, self.hs.config.shadow_server.get("hs") + ) + self.shadow_3pid_delete(body, shadow_user.to_string()) + if ret: id_server_unbind_result = "success" else: @@ -753,6 +882,77 @@ class ThreepidDeleteRestServlet(RestServlet): return 200, {"id_server_unbind_result": id_server_unbind_result} + @defer.inlineCallbacks + def shadow_3pid_delete(self, body, user_id): + # TODO: retries + shadow_hs_url = self.hs.config.shadow_server.get("hs_url") + as_token = self.hs.config.shadow_server.get("as_token") + + yield self.http_client.post_json_get_json( + "%s/_matrix/client/r0/account/3pid/delete?access_token=%s&user_id=%s" + % (shadow_hs_url, as_token, user_id), + body, + ) + + +class ThreepidLookupRestServlet(RestServlet): + PATTERNS = [re.compile("^/_matrix/client/unstable/account/3pid/lookup$")] + + def __init__(self, hs): + super(ThreepidLookupRestServlet, self).__init__() + self.auth = hs.get_auth() + self.identity_handler = hs.get_handlers().identity_handler + + @defer.inlineCallbacks + def on_GET(self, request): + """Proxy a /_matrix/identity/api/v1/lookup request to an identity + server + """ + yield self.auth.get_user_by_req(request) + + # Verify query parameters + query_params = request.args + assert_params_in_dict(query_params, [b"medium", b"address", b"id_server"]) + + # Retrieve needed information from query parameters + medium = parse_string(request, "medium") + address = parse_string(request, "address") + id_server = parse_string(request, "id_server") + + # Proxy the request to the identity server. lookup_3pid handles checking + # if the lookup is allowed so we don't need to do it here. + ret = yield self.identity_handler.proxy_lookup_3pid(id_server, medium, address) + + defer.returnValue((200, ret)) + + +class ThreepidBulkLookupRestServlet(RestServlet): + PATTERNS = [re.compile("^/_matrix/client/unstable/account/3pid/bulk_lookup$")] + + def __init__(self, hs): + super(ThreepidBulkLookupRestServlet, self).__init__() + self.auth = hs.get_auth() + self.identity_handler = hs.get_handlers().identity_handler + + @defer.inlineCallbacks + def on_POST(self, request): + """Proxy a /_matrix/identity/api/v1/bulk_lookup request to an identity + server + """ + yield self.auth.get_user_by_req(request) + + body = parse_json_object_from_request(request) + + assert_params_in_dict(body, ["threepids", "id_server"]) + + # Proxy the request to the identity server. lookup_3pid handles checking + # if the lookup is allowed so we don't need to do it here. + ret = yield self.identity_handler.proxy_bulk_lookup_3pid( + body["id_server"], body["threepids"] + ) + + defer.returnValue((200, ret)) + class WhoamiRestServlet(RestServlet): PATTERNS = client_patterns("/account/whoami$") @@ -781,4 +981,6 @@ def register_servlets(hs, http_server): ThreepidBindRestServlet(hs).register(http_server) ThreepidUnbindRestServlet(hs).register(http_server) ThreepidDeleteRestServlet(hs).register(http_server) + ThreepidLookupRestServlet(hs).register(http_server) + ThreepidBulkLookupRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py
index 64eb7fec3b..17495f020b 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py
@@ -15,8 +15,11 @@ import logging +from twisted.internet import defer + from synapse.api.errors import AuthError, NotFoundError, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.types import UserID from ._base import client_patterns @@ -38,6 +41,7 @@ class AccountDataServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() + self._profile_handler = hs.get_profile_handler() async def on_PUT(self, request, user_id, account_data_type): requester = await self.auth.get_user_by_req(request) @@ -46,6 +50,11 @@ class AccountDataServlet(RestServlet): body = parse_json_object_from_request(request) + if account_data_type == "im.vector.hide_profile": + user = UserID.from_string(user_id) + hide_profile = body.get("hide_profile") + await self._profile_handler.set_active(user, not hide_profile, True) + max_id = await self.store.add_account_data_for_user( user_id, account_data_type, body ) diff --git a/synapse/rest/client/v2_alpha/password_policy.py b/synapse/rest/client/v2_alpha/password_policy.py new file mode 100644
index 0000000000..968403cca4 --- /dev/null +++ b/synapse/rest/client/v2_alpha/password_policy.py
@@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from synapse.http.servlet import RestServlet + +from ._base import client_patterns + +logger = logging.getLogger(__name__) + + +class PasswordPolicyServlet(RestServlet): + PATTERNS = client_patterns("/password_policy$") + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(PasswordPolicyServlet, self).__init__() + + self.policy = hs.config.password_policy + self.enabled = hs.config.password_policy_enabled + + def on_GET(self, request): + if not self.enabled or not self.policy: + return (200, {}) + + policy = {} + + for param in [ + "minimum_length", + "require_digit", + "require_symbol", + "require_lowercase", + "require_uppercase", + ]: + if param in self.policy: + policy["m.%s" % param] = self.policy[param] + + return (200, policy) + + +def register_servlets(hs, http_server): + PasswordPolicyServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 66de16a1fa..4a2406e9c9 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py
@@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- -# Copyright 2015 - 2016 OpenMarket Ltd -# Copyright 2017 Vector Creations Ltd +# Copyright 2015-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +17,7 @@ import hmac import logging +import re from typing import List, Union from six import string_types @@ -48,6 +50,7 @@ from synapse.http.servlet import ( from synapse.push.mailer import load_jinja2_templates from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.ratelimitutils import FederationRateLimiter +from synapse.util.stringutils import assert_valid_client_secret from synapse.util.threepids import check_3pid_allowed from ._base import client_patterns, interactive_auth_handler @@ -119,10 +122,10 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param - if not check_3pid_allowed(self.hs, "email", email): + if not (await check_3pid_allowed(self.hs, "email", body["email"])): raise SynapseError( 403, - "Your email domain is not authorized to register on this server", + "Your email is not authorized to register on this server", Codes.THREEPID_DENIED, ) @@ -186,7 +189,9 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): msisdn = phone_number_to_msisdn(country, phone_number) - if not check_3pid_allowed(self.hs, "msisdn", msisdn): + assert_valid_client_secret(body["client_secret"]) + + if not (await check_3pid_allowed(self.hs, "msisdn", msisdn)): raise SynapseError( 403, "Phone numbers are not authorized to register on this server", @@ -369,6 +374,7 @@ class RegisterRestServlet(RestServlet): self.room_member_handler = hs.get_room_member_handler() self.macaroon_gen = hs.get_macaroon_generator() self.ratelimiter = hs.get_registration_ratelimiter() + self.password_policy_handler = hs.get_password_policy_handler() self.clock = hs.get_clock() self._registration_flows = _calculate_registration_flows( @@ -410,12 +416,15 @@ class RegisterRestServlet(RestServlet): # we do basic sanity checks here because the auth layer will store these # in sessions. Pull out the username/password provided to us. + desired_password = None if "password" in body: if ( not isinstance(body["password"], string_types) or len(body["password"]) > 512 ): raise SynapseError(400, "Invalid password") + self.password_policy_handler.validate_password(body["password"]) + desired_password = body["password"] desired_username = None if "username" in body: @@ -426,6 +435,8 @@ class RegisterRestServlet(RestServlet): raise SynapseError(400, "Invalid username") desired_username = body["username"] + desired_display_name = body.get("display_name") + appservice = None if self.auth.has_access_token(request): appservice = await self.auth.get_appservice_by_req(request) @@ -449,7 +460,11 @@ class RegisterRestServlet(RestServlet): if isinstance(desired_username, string_types): result = await self._do_appservice_registration( - desired_username, access_token, body + desired_username, + desired_password, + desired_display_name, + access_token, + body, ) return 200, result # we throw for non 200 responses @@ -510,7 +525,7 @@ class RegisterRestServlet(RestServlet): medium = auth_result[login_type]["medium"] address = auth_result[login_type]["address"] - if not check_3pid_allowed(self.hs, medium, address): + if not (await check_3pid_allowed(self.hs, medium, address)): raise SynapseError( 403, "Third party identifiers (email/phone numbers)" @@ -518,6 +533,80 @@ class RegisterRestServlet(RestServlet): Codes.THREEPID_DENIED, ) + existingUid = await self.store.get_user_id_by_threepid( + medium, address + ) + + if existingUid is not None: + raise SynapseError( + 400, "%s is already in use" % medium, Codes.THREEPID_IN_USE + ) + + if self.hs.config.register_mxid_from_3pid: + # override the desired_username based on the 3PID if any. + # reset it first to avoid folks picking their own username. + desired_username = None + + # we should have an auth_result at this point if we're going to progress + # to register the user (i.e. we haven't picked up a registered_user_id + # from our session store), in which case get ready and gen the + # desired_username + if auth_result: + if ( + self.hs.config.register_mxid_from_3pid == "email" + and LoginType.EMAIL_IDENTITY in auth_result + ): + address = auth_result[LoginType.EMAIL_IDENTITY]["address"] + desired_username = synapse.types.strip_invalid_mxid_characters( + address.replace("@", "-").lower() + ) + + # find a unique mxid for the account, suffixing numbers + # if needed + while True: + try: + await self.registration_handler.check_username( + desired_username, + guest_access_token=guest_access_token, + assigned_user_id=registered_user_id, + ) + # if we got this far we passed the check. + break + except SynapseError as e: + if e.errcode == Codes.USER_IN_USE: + m = re.match(r"^(.*?)(\d+)$", desired_username) + if m: + desired_username = m.group(1) + str( + int(m.group(2)) + 1 + ) + else: + desired_username += "1" + else: + # something else went wrong. + break + + if self.hs.config.register_just_use_email_for_display_name: + desired_display_name = address + else: + # Custom mapping between email address and display name + desired_display_name = self._map_email_to_displayname(address) + elif ( + self.hs.config.register_mxid_from_3pid == "msisdn" + and LoginType.MSISDN in auth_result + ): + desired_username = auth_result[LoginType.MSISDN]["address"] + else: + raise SynapseError( + 400, "Cannot derive mxid from 3pid; no recognised 3pid" + ) + + if desired_username is not None: + await self.registration_handler.check_username( + desired_username, + guest_access_token=guest_access_token, + assigned_user_id=registered_user_id, + ) + if registered_user_id is not None: logger.info( "Already registered user ID %r for this session", registered_user_id @@ -528,9 +617,16 @@ class RegisterRestServlet(RestServlet): # NB: This may be from the auth handler and NOT from the POST assert_params_in_dict(params, ["password"]) - desired_username = params.get("username", None) + if not self.hs.config.register_mxid_from_3pid: + desired_username = params.get("username", None) + else: + # we keep the original desired_username derived from the 3pid above + pass + guest_access_token = params.get("guest_access_token", None) - new_password = params.get("password", None) + + # XXX: don't we need to validate these for length etc like we did on + # the ones from the JSON body earlier on in the method? if desired_username is not None: desired_username = desired_username.lower() @@ -563,8 +659,9 @@ class RegisterRestServlet(RestServlet): registered_user_id = await self.registration_handler.register_user( localpart=desired_username, - password=new_password, + password=params.get("password", None), guest_access_token=guest_access_token, + default_display_name=desired_display_name, threepid=threepid, address=client_addr, ) @@ -576,6 +673,14 @@ class RegisterRestServlet(RestServlet): ): await self.store.upsert_monthly_active_user(registered_user_id) + if self.hs.config.shadow_server: + await self.registration_handler.shadow_register( + localpart=desired_username, + display_name=desired_display_name, + auth_result=auth_result, + params=params, + ) + # remember that we've now registered that user account, and with # what user ID (since the user may not have specified) self.auth_handler.set_session_data( @@ -600,11 +705,30 @@ class RegisterRestServlet(RestServlet): def on_OPTIONS(self, _): return 200, {} - async def _do_appservice_registration(self, username, as_token, body): + async def _do_appservice_registration( + self, username, password, display_name, as_token, body + ): + # FIXME: appservice_register() is horribly duplicated with register() + # and they should probably just be combined together with a config flag. user_id = await self.registration_handler.appservice_register( - username, as_token + username, as_token, password, display_name ) - return await self._create_registration_details(user_id, body) + result = await self._create_registration_details(user_id, body) + + auth_result = body.get("auth_result") + if auth_result and LoginType.EMAIL_IDENTITY in auth_result: + threepid = auth_result[LoginType.EMAIL_IDENTITY] + await self._register_email_threepid( + user_id, threepid, result["access_token"], body.get("bind_email") + ) + + if auth_result and LoginType.MSISDN in auth_result: + threepid = auth_result[LoginType.MSISDN] + await self._register_msisdn_threepid( + user_id, threepid, result["access_token"], body.get("bind_msisdn") + ) + + return result async def _create_registration_details(self, user_id, params): """Complete registration of newly-registered user @@ -655,6 +779,60 @@ class RegisterRestServlet(RestServlet): ) +def cap(name): + """Capitalise parts of a name containing different words, including those + separated by hyphens. + For example, 'John-Doe' + + Args: + name (str): The name to parse + """ + if not name: + return name + + # Split the name by whitespace then hyphens, capitalizing each part then + # joining it back together. + capatilized_name = " ".join( + "-".join(part.capitalize() for part in space_part.split("-")) + for space_part in name.split() + ) + return capatilized_name + + +def _map_email_to_displayname(address): + """Custom mapping from an email address to a user displayname + + Args: + address (str): The email address to process + Returns: + str: The new displayname + """ + # Split the part before and after the @ in the email. + # Replace all . with spaces in the first part + parts = address.replace(".", " ").split("@") + + # Figure out which org this email address belongs to + org_parts = parts[1].split(" ") + + # If this is a ...matrix.org email, mark them as an Admin + if org_parts[-2] == "matrix" and org_parts[-1] == "org": + org = "Tchap Admin" + + # Is this is a ...gouv.fr address, set the org to whatever is before + # gouv.fr. If there isn't anything (a @gouv.fr email) simply mark their + # org as "gouv" + elif org_parts[-2] == "gouv" and org_parts[-1] == "fr": + org = org_parts[-3] if len(org_parts) > 2 else org_parts[-2] + + # Otherwise, mark their org as the email's second-level domain name + else: + org = org_parts[-2] + + desired_display_name = cap(parts[0]) + " [" + cap(org) + "]" + + return desired_display_name + + def _calculate_registration_flows( # technically `config` has to provide *all* of these interfaces, not just one config: Union[RegistrationConfig, ConsentConfig, CaptchaConfig], diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py
index bef91a2d3e..faf9dbdea4 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py
@@ -15,8 +15,13 @@ import logging +from signedjson.sign import sign_json + +from twisted.internet import defer + from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.types import UserID from ._base import client_patterns @@ -35,6 +40,7 @@ class UserDirectorySearchRestServlet(RestServlet): self.hs = hs self.auth = hs.get_auth() self.user_directory_handler = hs.get_user_directory_handler() + self.http_client = hs.get_simple_http_client() async def on_POST(self, request): """Searches for users in directory @@ -61,6 +67,16 @@ class UserDirectorySearchRestServlet(RestServlet): body = parse_json_object_from_request(request) + if self.hs.config.user_directory_defer_to_id_server: + signed_body = sign_json( + body, self.hs.hostname, self.hs.config.signing_key[0] + ) + url = "%s/_matrix/identity/api/v1/user_directory/search" % ( + self.hs.config.user_directory_defer_to_id_server, + ) + resp = await self.http_client.post_json_get_json(url, signed_body) + defer.returnValue((200, resp)) + limit = body.get("limit", 10) limit = min(limit, 50) @@ -76,5 +92,87 @@ class UserDirectorySearchRestServlet(RestServlet): return 200, results +class UserInfoServlet(RestServlet): + """ + GET /user/{user_id}/info HTTP/1.1 + """ + + PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/info$") + + def __init__(self, hs): + super(UserInfoServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() + self.store = hs.get_datastore() + self.notifier = hs.get_notifier() + self.clock = hs.get_clock() + self.transport_layer = hs.get_federation_transport_client() + registry = hs.get_federation_registry() + + if not registry.query_handlers.get("user_info"): + registry.register_query_handler("user_info", self._on_federation_query) + + @defer.inlineCallbacks + def on_GET(self, request, user_id): + # Ensure the user is authenticated + yield self.auth.get_user_by_req(request, allow_guest=False) + + user = UserID.from_string(user_id) + if not self.hs.is_mine(user): + # Attempt to make a federation request to the server that owns this user + args = {"user_id": user_id} + res = yield self.transport_layer.make_query( + user.domain, "user_info", args, retry_on_dns_fail=True + ) + defer.returnValue((200, res)) + + res = yield self._get_user_info(user_id) + defer.returnValue((200, res)) + + @defer.inlineCallbacks + def _on_federation_query(self, args): + """Called when a request for user information appears over federation + + Args: + args (dict): Dictionary of query arguments provided by the request + + Returns: + Deferred[dict]: Deactivation and expiration information for a given user + """ + user_id = args.get("user_id") + if not user_id: + raise SynapseError(400, "user_id not provided") + + user = UserID.from_string(user_id) + if not self.hs.is_mine(user): + raise SynapseError(400, "User is not hosted on this homeserver") + + res = yield self._get_user_info(user_id) + defer.returnValue(res) + + @defer.inlineCallbacks + def _get_user_info(self, user_id): + """Retrieve information about a given user + + Args: + user_id (str): The User ID of a given user on this homeserver + + Returns: + Deferred[dict]: Deactivation and expiration information for a given user + """ + # Check whether user is deactivated + is_deactivated = yield self.store.get_user_deactivated_status(user_id) + + # Check whether user is expired + expiration_ts = yield self.store.get_expiration_ts_for_user(user_id) + is_expired = ( + expiration_ts is not None and self.clock.time_msec() >= expiration_ts + ) + + res = {"expired": is_expired, "deactivated": is_deactivated} + defer.returnValue(res) + + def register_servlets(hs, http_server): UserDirectorySearchRestServlet(hs).register(http_server) + UserInfoServlet(hs).register(http_server) diff --git a/synapse/rulecheck/__init__.py b/synapse/rulecheck/__init__.py new file mode 100644
index 0000000000..e69de29bb2 --- /dev/null +++ b/synapse/rulecheck/__init__.py
diff --git a/synapse/rulecheck/domain_rule_checker.py b/synapse/rulecheck/domain_rule_checker.py new file mode 100644
index 0000000000..6f2a1931c5 --- /dev/null +++ b/synapse/rulecheck/domain_rule_checker.py
@@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from synapse.config._base import ConfigError + +logger = logging.getLogger(__name__) + + +class DomainRuleChecker(object): + """ + A re-implementation of the SpamChecker that prevents users in one domain from + inviting users in other domains to rooms, based on a configuration. + + Takes a config in the format: + + spam_checker: + module: "rulecheck.DomainRuleChecker" + config: + domain_mapping: + "inviter_domain": [ "invitee_domain_permitted", "other_domain_permitted" ] + "other_inviter_domain": [ "invitee_domain_permitted" ] + default: False + + # Only let local users join rooms if they were explicitly invited. + can_only_join_rooms_with_invite: false + + # Only let local users create rooms if they are inviting only one + # other user, and that user matches the rules above. + can_only_create_one_to_one_rooms: false + + # Only let local users invite during room creation, regardless of the + # domain mapping rules above. + can_only_invite_during_room_creation: false + + # Prevent local users from inviting users from certain domains to + # rooms published in the room directory. + domains_prevented_from_being_invited_to_published_rooms: [] + + # Allow third party invites + can_invite_by_third_party_id: true + + Don't forget to consider if you can invite users from your own domain. + """ + + def __init__(self, config): + self.domain_mapping = config["domain_mapping"] or {} + self.default = config["default"] + + self.can_only_join_rooms_with_invite = config.get( + "can_only_join_rooms_with_invite", False + ) + self.can_only_create_one_to_one_rooms = config.get( + "can_only_create_one_to_one_rooms", False + ) + self.can_only_invite_during_room_creation = config.get( + "can_only_invite_during_room_creation", False + ) + self.can_invite_by_third_party_id = config.get( + "can_invite_by_third_party_id", True + ) + self.domains_prevented_from_being_invited_to_published_rooms = config.get( + "domains_prevented_from_being_invited_to_published_rooms", [] + ) + + def check_event_for_spam(self, event): + """Implements synapse.events.SpamChecker.check_event_for_spam + """ + return False + + def user_may_invite( + self, + inviter_userid, + invitee_userid, + third_party_invite, + room_id, + new_room, + published_room=False, + ): + """Implements synapse.events.SpamChecker.user_may_invite + """ + if self.can_only_invite_during_room_creation and not new_room: + return False + + if not self.can_invite_by_third_party_id and third_party_invite: + return False + + # This is a third party invite (without a bound mxid), so unless we have + # banned all third party invites (above) we allow it. + if not invitee_userid: + return True + + inviter_domain = self._get_domain_from_id(inviter_userid) + invitee_domain = self._get_domain_from_id(invitee_userid) + + if inviter_domain not in self.domain_mapping: + return self.default + + if ( + published_room + and invitee_domain + in self.domains_prevented_from_being_invited_to_published_rooms + ): + return False + + return invitee_domain in self.domain_mapping[inviter_domain] + + def user_may_create_room( + self, userid, invite_list, third_party_invite_list, cloning + ): + """Implements synapse.events.SpamChecker.user_may_create_room + """ + + if cloning: + return True + + if not self.can_invite_by_third_party_id and third_party_invite_list: + return False + + number_of_invites = len(invite_list) + len(third_party_invite_list) + + if self.can_only_create_one_to_one_rooms and number_of_invites != 1: + return False + + return True + + def user_may_create_room_alias(self, userid, room_alias): + """Implements synapse.events.SpamChecker.user_may_create_room_alias + """ + return True + + def user_may_publish_room(self, userid, room_id): + """Implements synapse.events.SpamChecker.user_may_publish_room + """ + return True + + def user_may_join_room(self, userid, room_id, is_invited): + """Implements synapse.events.SpamChecker.user_may_join_room + """ + if self.can_only_join_rooms_with_invite and not is_invited: + return False + + return True + + @staticmethod + def parse_config(config): + """Implements synapse.events.SpamChecker.parse_config + """ + if "default" in config: + return config + else: + raise ConfigError("No default set for spam_config DomainRuleChecker") + + @staticmethod + def _get_domain_from_id(mxid): + """Parses a string and returns the domain part of the mxid. + + Args: + mxid (str): a valid mxid + + Returns: + str: the domain part of the mxid + + """ + idx = mxid.find(":") + if idx == -1: + raise Exception("Invalid ID: %r" % (mxid,)) + return mxid[idx + 1 :] diff --git a/synapse/server.py b/synapse/server.py
index 2db3dab221..5f4c330e2e 100644 --- a/synapse/server.py +++ b/synapse/server.py
@@ -34,6 +34,7 @@ from synapse.api.filtering import Filtering from synapse.api.ratelimiting import Ratelimiter from synapse.appservice.api import ApplicationServiceApi from synapse.appservice.scheduler import ApplicationServiceScheduler +from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory @@ -66,6 +67,7 @@ from synapse.handlers.groups_local import GroupsLocalHandler from synapse.handlers.initial_sync import InitialSyncHandler from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler +from synapse.handlers.password_policy import PasswordPolicyHandler from synapse.handlers.presence import PresenceHandler from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler from synapse.handlers.read_marker import ReadMarkerHandler @@ -97,6 +99,7 @@ from synapse.server_notices.worker_server_notices_sender import ( ) from synapse.state import StateHandler, StateResolutionHandler from synapse.storage import DataStores, Storage +from synapse.storage.engines import create_engine from synapse.streams.events import EventSources from synapse.util import Clock from synapse.util.distributor import Distributor @@ -168,6 +171,7 @@ class HomeServer(object): "event_builder_factory", "filtering", "http_client_context_factory", + "proxied_http_client", "simple_http_client", "proxied_http_client", "media_repository", @@ -200,6 +204,7 @@ class HomeServer(object): "saml_handler", "event_client_serializer", "storage", + "password_policy_handler", ] REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"] @@ -209,16 +214,18 @@ class HomeServer(object): # instantiated during setup() for future return by get_datastore() DATASTORE_CLASS = abc.abstractproperty() - def __init__(self, hostname, reactor=None, **kwargs): + def __init__(self, hostname: str, config: HomeServerConfig, reactor=None, **kwargs): """ Args: hostname : The hostname for the server. + config: The full config for the homeserver. """ if not reactor: from twisted.internet import reactor self._reactor = reactor self.hostname = hostname + self.config = config self._building = {} self._listening_services = [] self.start_time = None @@ -229,6 +236,12 @@ class HomeServer(object): self.admin_redaction_ratelimiter = Ratelimiter() self.registration_ratelimiter = Ratelimiter() + self.database_engine = create_engine(config.database_config) + config.database_config.setdefault("args", {})[ + "cp_openfun" + ] = self.database_engine.on_new_connection + self.db_config = config.database_config + self.datastores = None # Other kwargs are explicit dependencies @@ -553,6 +566,9 @@ class HomeServer(object): def build_storage(self) -> Storage: return Storage(self, self.datastores) + def build_password_policy_handler(self): + return PasswordPolicyHandler(self) + def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 0e75e94c6f..5accc071ab 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py
@@ -32,6 +32,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.logging.utils import log_function from synapse.state import v1, v2 +from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour from synapse.util.async_helpers import Linearizer from synapse.util.caches import get_cache_factor_for from synapse.util.caches.expiringcache import ExpiringCache @@ -655,7 +656,7 @@ class StateResolutionStore(object): return self.store.get_events( event_ids, - check_redacted=False, + redact_behaviour=EventRedactBehaviour.AS_IS, get_prev_content=False, allow_rejected=allow_rejected, ) diff --git a/synapse/storage/data_stores/main/appservice.py b/synapse/storage/data_stores/main/appservice.py
index b2f39649fd..340de45e99 100644 --- a/synapse/storage/data_stores/main/appservice.py +++ b/synapse/storage/data_stores/main/appservice.py
@@ -35,7 +35,7 @@ def _make_exclusive_regex(services_cache): exclusive_user_regexes = [ regex.pattern for service in services_cache - for regex in service.get_exlusive_user_regexes() + for regex in service.get_exclusive_user_regexes() ] if exclusive_user_regexes: exclusive_user_regex = "|".join("(" + r + ")" for r in exclusive_user_regexes) diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/data_stores/main/client_ips.py
index 320c5b0f07..add3037b69 100644 --- a/synapse/storage/data_stores/main/client_ips.py +++ b/synapse/storage/data_stores/main/client_ips.py
@@ -451,16 +451,18 @@ class ClientIpStore(ClientIpBackgroundUpdateStore): # Technically an access token might not be associated with # a device so we need to check. if device_id: - self.db.simple_upsert_txn( + # this is always an update rather than an upsert: the row should + # already exist, and if it doesn't, that may be because it has been + # deleted, and we don't want to re-create it. + self.db.simple_update_txn( txn, table="devices", keyvalues={"user_id": user_id, "device_id": device_id}, - values={ + updatevalues={ "user_agent": user_agent, "last_seen": last_seen, "ip": ip, }, - lock=False, ) except Exception as e: # Failed to upsert, log and continue diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py
index 38cd0ca9b8..e551606f9d 100644 --- a/synapse/storage/data_stores/main/end_to_end_keys.py +++ b/synapse/storage/data_stores/main/end_to_end_keys.py
@@ -14,15 +14,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, List + from six import iteritems from canonicaljson import encode_canonical_json, json +from twisted.enterprise.adbapi import Connection from twisted.internet import defer from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import cached, cachedList class EndToEndKeyWorkerStore(SQLBaseStore): @@ -271,7 +274,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): Args: txn (twisted.enterprise.adbapi.Connection): db connection user_id (str): the user whose key is being requested - key_type (str): the type of key that is being set: either 'master' + key_type (str): the type of key that is being requested: either 'master' for a master key, 'self_signing' for a self-signing key, or 'user_signing' for a user-signing key from_user_id (str): if specified, signatures made by this user on @@ -316,8 +319,10 @@ class EndToEndKeyWorkerStore(SQLBaseStore): """Returns a user's cross-signing key. Args: - user_id (str): the user whose self-signing key is being requested - key_type (str): the type of cross-signing key to get + user_id (str): the user whose key is being requested + key_type (str): the type of key that is being requested: either 'master' + for a master key, 'self_signing' for a self-signing key, or + 'user_signing' for a user-signing key from_user_id (str): if specified, signatures made by this user on the self-signing key will be included in the result @@ -332,6 +337,206 @@ class EndToEndKeyWorkerStore(SQLBaseStore): from_user_id, ) + @cached(num_args=1) + def _get_bare_e2e_cross_signing_keys(self, user_id): + """Dummy function. Only used to make a cache for + _get_bare_e2e_cross_signing_keys_bulk. + """ + raise NotImplementedError() + + @cachedList( + cached_method_name="_get_bare_e2e_cross_signing_keys", + list_name="user_ids", + num_args=1, + ) + def _get_bare_e2e_cross_signing_keys_bulk( + self, user_ids: List[str] + ) -> Dict[str, Dict[str, dict]]: + """Returns the cross-signing keys for a set of users. The output of this + function should be passed to _get_e2e_cross_signing_signatures_txn if + the signatures for the calling user need to be fetched. + + Args: + user_ids (list[str]): the users whose keys are being requested + + Returns: + dict[str, dict[str, dict]]: mapping from user ID to key type to key + data. If a user's cross-signing keys were not found, either + their user ID will not be in the dict, or their user ID will map + to None. + + """ + return self.db.runInteraction( + "get_bare_e2e_cross_signing_keys_bulk", + self._get_bare_e2e_cross_signing_keys_bulk_txn, + user_ids, + ) + + def _get_bare_e2e_cross_signing_keys_bulk_txn( + self, txn: Connection, user_ids: List[str], + ) -> Dict[str, Dict[str, dict]]: + """Returns the cross-signing keys for a set of users. The output of this + function should be passed to _get_e2e_cross_signing_signatures_txn if + the signatures for the calling user need to be fetched. + + Args: + txn (twisted.enterprise.adbapi.Connection): db connection + user_ids (list[str]): the users whose keys are being requested + + Returns: + dict[str, dict[str, dict]]: mapping from user ID to key type to key + data. If a user's cross-signing keys were not found, their user + ID will not be in the dict. + + """ + result = {} + + batch_size = 100 + chunks = [ + user_ids[i : i + batch_size] for i in range(0, len(user_ids), batch_size) + ] + for user_chunk in chunks: + sql = """ + SELECT k.user_id, k.keytype, k.keydata, k.stream_id + FROM e2e_cross_signing_keys k + INNER JOIN (SELECT user_id, keytype, MAX(stream_id) AS stream_id + FROM e2e_cross_signing_keys + GROUP BY user_id, keytype) s + USING (user_id, stream_id, keytype) + WHERE k.user_id IN (%s) + """ % ( + ",".join("?" for u in user_chunk), + ) + query_params = [] + query_params.extend(user_chunk) + + txn.execute(sql, query_params) + rows = self.db.cursor_to_dict(txn) + + for row in rows: + user_id = row["user_id"] + key_type = row["keytype"] + key = json.loads(row["keydata"]) + user_info = result.setdefault(user_id, {}) + user_info[key_type] = key + + return result + + def _get_e2e_cross_signing_signatures_txn( + self, txn: Connection, keys: Dict[str, Dict[str, dict]], from_user_id: str, + ) -> Dict[str, Dict[str, dict]]: + """Returns the cross-signing signatures made by a user on a set of keys. + + Args: + txn (twisted.enterprise.adbapi.Connection): db connection + keys (dict[str, dict[str, dict]]): a map of user ID to key type to + key data. This dict will be modified to add signatures. + from_user_id (str): fetch the signatures made by this user + + Returns: + dict[str, dict[str, dict]]: mapping from user ID to key type to key + data. The return value will be the same as the keys argument, + with the modifications included. + """ + + # find out what cross-signing keys (a.k.a. devices) we need to get + # signatures for. This is a map of (user_id, device_id) to key type + # (device_id is the key's public part). + devices = {} + + for user_id, user_info in keys.items(): + if user_info is None: + continue + for key_type, key in user_info.items(): + device_id = None + for k in key["keys"].values(): + device_id = k + devices[(user_id, device_id)] = key_type + + device_list = list(devices) + + # split into batches + batch_size = 100 + chunks = [ + device_list[i : i + batch_size] + for i in range(0, len(device_list), batch_size) + ] + for user_chunk in chunks: + sql = """ + SELECT target_user_id, target_device_id, key_id, signature + FROM e2e_cross_signing_signatures + WHERE user_id = ? + AND (%s) + """ % ( + " OR ".join( + "(target_user_id = ? AND target_device_id = ?)" for d in devices + ) + ) + query_params = [from_user_id] + for item in devices: + # item is a (user_id, device_id) tuple + query_params.extend(item) + + txn.execute(sql, query_params) + rows = self.db.cursor_to_dict(txn) + + # and add the signatures to the appropriate keys + for row in rows: + key_id = row["key_id"] + target_user_id = row["target_user_id"] + target_device_id = row["target_device_id"] + key_type = devices[(target_user_id, target_device_id)] + # We need to copy everything, because the result may have come + # from the cache. dict.copy only does a shallow copy, so we + # need to recursively copy the dicts that will be modified. + user_info = keys[target_user_id] = keys[target_user_id].copy() + target_user_key = user_info[key_type] = user_info[key_type].copy() + if "signatures" in target_user_key: + signatures = target_user_key["signatures"] = target_user_key[ + "signatures" + ].copy() + if from_user_id in signatures: + user_sigs = signatures[from_user_id] = signatures[from_user_id] + user_sigs[key_id] = row["signature"] + else: + signatures[from_user_id] = {key_id: row["signature"]} + else: + target_user_key["signatures"] = { + from_user_id: {key_id: row["signature"]} + } + + return keys + + @defer.inlineCallbacks + def get_e2e_cross_signing_keys_bulk( + self, user_ids: List[str], from_user_id: str = None + ) -> defer.Deferred: + """Returns the cross-signing keys for a set of users. + + Args: + user_ids (list[str]): the users whose keys are being requested + from_user_id (str): if specified, signatures made by this user on + the self-signing keys will be included in the result + + Returns: + Deferred[dict[str, dict[str, dict]]]: map of user ID to key type to + key data. If a user's cross-signing keys were not found, either + their user ID will not be in the dict, or their user ID will map + to None. + """ + + result = yield self._get_bare_e2e_cross_signing_keys_bulk(user_ids) + + if from_user_id: + result = yield self.db.runInteraction( + "get_e2e_cross_signing_signatures", + self._get_e2e_cross_signing_signatures_txn, + result, + from_user_id, + ) + + return result + def get_all_user_signature_changes_for_remotes(self, from_key, to_key): """Return a list of changes from the user signature stream to notify remotes. Note that the user signature stream represents when a user signs their @@ -520,6 +725,10 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): }, ) + self._invalidate_cache_and_stream( + txn, self._get_bare_e2e_cross_signing_keys, (user_id,) + ) + def set_e2e_cross_signing_key(self, user_id, key_type, key): """Set a user's cross-signing key. diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index 9ee117ce0f..2c9142814c 100644 --- a/synapse/storage/data_stores/main/events_worker.py +++ b/synapse/storage/data_stores/main/events_worker.py
@@ -19,8 +19,10 @@ import itertools import logging import threading from collections import namedtuple +from typing import List, Optional from canonicaljson import json +from constantly import NamedConstant, Names from twisted.internet import defer @@ -55,6 +57,16 @@ EVENT_QUEUE_TIMEOUT_S = 0.1 # Timeout when waiting for requests for events _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event")) +class EventRedactBehaviour(Names): + """ + What to do when retrieving a redacted event from the database. + """ + + AS_IS = NamedConstant() + REDACT = NamedConstant() + BLOCK = NamedConstant() + + class EventsWorkerStore(SQLBaseStore): def __init__(self, database: Database, db_conn, hs): super(EventsWorkerStore, self).__init__(database, db_conn, hs) @@ -125,25 +137,27 @@ class EventsWorkerStore(SQLBaseStore): @defer.inlineCallbacks def get_event( self, - event_id, - check_redacted=True, - get_prev_content=False, - allow_rejected=False, - allow_none=False, - check_room_id=None, + event_id: List[str], + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + get_prev_content: bool = False, + allow_rejected: bool = False, + allow_none: bool = False, + check_room_id: Optional[str] = None, ): """Get an event from the database by event_id. Args: - event_id (str): The event_id of the event to fetch - check_redacted (bool): If True, check if event has been redacted - and redact it. - get_prev_content (bool): If True and event is a state event, + event_id: The event_id of the event to fetch + redact_behaviour: Determine what to do with a redacted event. Possible values: + * AS_IS - Return the full event body with no redacted content + * REDACT - Return the event but with a redacted body + * DISALLOW - Do not return redacted events + get_prev_content: If True and event is a state event, include the previous states content in the unsigned field. - allow_rejected (bool): If True return rejected events. - allow_none (bool): If True, return None if no event found, if + allow_rejected: If True return rejected events. + allow_none: If True, return None if no event found, if False throw a NotFoundError - check_room_id (str|None): if not None, check the room of the found event. + check_room_id: if not None, check the room of the found event. If there is a mismatch, behave as per allow_none. Returns: @@ -154,7 +168,7 @@ class EventsWorkerStore(SQLBaseStore): events = yield self.get_events_as_list( [event_id], - check_redacted=check_redacted, + redact_behaviour=redact_behaviour, get_prev_content=get_prev_content, allow_rejected=allow_rejected, ) @@ -173,27 +187,30 @@ class EventsWorkerStore(SQLBaseStore): @defer.inlineCallbacks def get_events( self, - event_ids, - check_redacted=True, - get_prev_content=False, - allow_rejected=False, + event_ids: List[str], + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + get_prev_content: bool = False, + allow_rejected: bool = False, ): """Get events from the database Args: - event_ids (list): The event_ids of the events to fetch - check_redacted (bool): If True, check if event has been redacted - and redact it. - get_prev_content (bool): If True and event is a state event, + event_ids: The event_ids of the events to fetch + redact_behaviour: Determine what to do with a redacted event. Possible + values: + * AS_IS - Return the full event body with no redacted content + * REDACT - Return the event but with a redacted body + * DISALLOW - Do not return redacted events + get_prev_content: If True and event is a state event, include the previous states content in the unsigned field. - allow_rejected (bool): If True return rejected events. + allow_rejected: If True return rejected events. Returns: Deferred : Dict from event_id to event. """ events = yield self.get_events_as_list( event_ids, - check_redacted=check_redacted, + redact_behaviour=redact_behaviour, get_prev_content=get_prev_content, allow_rejected=allow_rejected, ) @@ -203,21 +220,23 @@ class EventsWorkerStore(SQLBaseStore): @defer.inlineCallbacks def get_events_as_list( self, - event_ids, - check_redacted=True, - get_prev_content=False, - allow_rejected=False, + event_ids: List[str], + redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT, + get_prev_content: bool = False, + allow_rejected: bool = False, ): """Get events from the database and return in a list in the same order as given by `event_ids` arg. Args: - event_ids (list): The event_ids of the events to fetch - check_redacted (bool): If True, check if event has been redacted - and redact it. - get_prev_content (bool): If True and event is a state event, + event_ids: The event_ids of the events to fetch + redact_behaviour: Determine what to do with a redacted event. Possible values: + * AS_IS - Return the full event body with no redacted content + * REDACT - Return the event but with a redacted body + * DISALLOW - Do not return redacted events + get_prev_content: If True and event is a state event, include the previous states content in the unsigned field. - allow_rejected (bool): If True return rejected events. + allow_rejected: If True, return rejected events. Returns: Deferred[list[EventBase]]: List of events fetched from the database. The @@ -319,10 +338,14 @@ class EventsWorkerStore(SQLBaseStore): # Update the cache to save doing the checks again. entry.event.internal_metadata.recheck_redaction = False - if check_redacted and entry.redacted_event: - event = entry.redacted_event - else: - event = entry.event + event = entry.event + + if entry.redacted_event: + if redact_behaviour == EventRedactBehaviour.BLOCK: + # Skip this event + continue + elif redact_behaviour == EventRedactBehaviour.REDACT: + event = entry.redacted_event events.append(event) diff --git a/synapse/storage/data_stores/main/profile.py b/synapse/storage/data_stores/main/profile.py
index 2b52cf9c1a..3dc4451447 100644 --- a/synapse/storage/data_stores/main/profile.py +++ b/synapse/storage/data_stores/main/profile.py
@@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,9 +17,13 @@ from twisted.internet import defer from synapse.api.errors import StoreError + +from synapse.storage import background_updates from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.main.roommember import ProfileInfo +BATCH_SIZE = 100 + class ProfileWorkerStore(SQLBaseStore): @defer.inlineCallbacks @@ -57,6 +62,54 @@ class ProfileWorkerStore(SQLBaseStore): desc="get_profile_avatar_url", ) + def get_latest_profile_replication_batch_number(self): + def f(txn): + txn.execute("SELECT MAX(batch) as maxbatch FROM profiles") + rows = self.db.cursor_to_dict(txn) + return rows[0]["maxbatch"] + + return self.db.runInteraction("get_latest_profile_replication_batch_number", f) + + def get_profile_batch(self, batchnum): + return self.db.simple_select_list( + table="profiles", + keyvalues={"batch": batchnum}, + retcols=("user_id", "displayname", "avatar_url", "active"), + desc="get_profile_batch", + ) + + def assign_profile_batch(self): + def f(txn): + sql = ( + "UPDATE profiles SET batch = " + "(SELECT COALESCE(MAX(batch), -1) + 1 FROM profiles) " + "WHERE user_id in (" + " SELECT user_id FROM profiles WHERE batch is NULL limit ?" + ")" + ) + txn.execute(sql, (BATCH_SIZE,)) + return txn.rowcount + + return self.db.runInteraction("assign_profile_batch", f) + + def get_replication_hosts(self): + def f(txn): + txn.execute( + "SELECT host, last_synced_batch FROM profile_replication_status" + ) + rows = self.db.cursor_to_dict(txn) + return {r["host"]: r["last_synced_batch"] for r in rows} + + return self.db.runInteraction("get_replication_hosts", f) + + def update_replication_batch_for_host(self, host, last_synced_batch): + return self.db.simple_upsert( + table="profile_replication_status", + keyvalues={"host": host}, + values={"last_synced_batch": last_synced_batch}, + desc="update_replication_batch_for_host", + ) + def get_from_remote_profile_cache(self, user_id): return self.db.simple_select_one( table="remote_profile_cache", @@ -71,24 +124,53 @@ class ProfileWorkerStore(SQLBaseStore): table="profiles", values={"user_id": user_localpart}, desc="create_profile" ) - def set_profile_displayname(self, user_localpart, new_displayname): - return self.db.simple_update_one( + def set_profile_displayname(self, user_localpart, new_displayname, batchnum): + return self.db.simple_upsert( table="profiles", keyvalues={"user_id": user_localpart}, - updatevalues={"displayname": new_displayname}, + values={"displayname": new_displayname, "batch": batchnum}, desc="set_profile_displayname", + lock=False, # we can do this because user_id has a unique index ) - def set_profile_avatar_url(self, user_localpart, new_avatar_url): - return self.db.simple_update_one( + def set_profile_avatar_url(self, user_localpart, new_avatar_url, batchnum): + return self.db.simple_upsert( table="profiles", keyvalues={"user_id": user_localpart}, - updatevalues={"avatar_url": new_avatar_url}, + values={"avatar_url": new_avatar_url, "batch": batchnum}, desc="set_profile_avatar_url", + lock=False, # we can do this because user_id has a unique index + ) + + def set_profile_active(self, user_localpart, active, hide, batchnum): + values = {"active": int(active), "batch": batchnum} + if not active and not hide: + # we are deactivating for real (not in hide mode) + # so clear the profile. + values["avatar_url"] = None + values["displayname"] = None + return self.db.simple_upsert( + table="profiles", + keyvalues={"user_id": user_localpart}, + values=values, + desc="set_profile_active", + lock=False, # we can do this because user_id has a unique index ) class ProfileStore(ProfileWorkerStore): + def __init__(self, database, db_conn, hs): + + super(ProfileStore, self).__init__(database, db_conn, hs) + + self.db.updates.register_background_index_update( + "profile_replication_status_host_index", + index_name="profile_replication_status_idx", + table="profile_replication_status", + columns=["host"], + unique=True, + ) + def add_remote_profile_cache(self, user_id, displayname, avatar_url): """Ensure we are caching the remote user's profiles. @@ -107,7 +189,7 @@ class ProfileStore(ProfileWorkerStore): ) def update_remote_profile_cache(self, user_id, displayname, avatar_url): - return self.db.simple_update( + return self.db.simple_upsert( table="remote_profile_cache", keyvalues={"user_id": user_id}, values={ diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index 5e8ecac0ea..6f0e949a84 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py
@@ -156,6 +156,28 @@ class RegistrationWorkerStore(SQLBaseStore): ) @defer.inlineCallbacks + def get_expired_users(self): + """Get IDs of all expired users + + Returns: + Deferred[list[str]]: List of expired user IDs + """ + + def get_expired_users_txn(txn, now_ms): + sql = """ + SELECT user_id from account_validity + WHERE expiration_ts_ms <= ? + """ + txn.execute(sql, (now_ms,)) + rows = txn.fetchall() + return [row[0] for row in rows] + + res = yield self.db.runInteraction( + "get_expired_users", get_expired_users_txn, self.clock.time_msec() + ) + defer.returnValue(res) + + @defer.inlineCallbacks def set_renewal_token_for_user(self, user_id, renewal_token): """Defines a renewal token for a given user. diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index aa476d0fbf..1ff26aae4e 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py
@@ -280,6 +280,24 @@ class RoomWorkerStore(SQLBaseStore): desc="is_room_blocked", ) + @defer.inlineCallbacks + def is_room_published(self, room_id): + """Check whether a room has been published in the local public room + directory. + + Args: + room_id (str) + Returns: + bool: Whether the room is currently published in the room directory + """ + # Get room information + room_info = yield self.get_room(room_id) + if not room_info: + defer.returnValue(False) + + # Check the is_public value + defer.returnValue(room_info.get("is_public", False)) + @cachedInlineCallbacks(max_entries=10000) def get_ratelimit_for_user(self, user_id): """Check if there are any overrides for ratelimiting for the given @@ -324,6 +342,11 @@ class RoomWorkerStore(SQLBaseStore): Returns: dict[int, int]: "min_lifetime" and "max_lifetime" for this room. """ + # If the room retention feature is disabled, return a policy with no minimum nor + # maximum, in order not to filter out events we should filter out when sending to + # the client. + if not self.config.retention_enabled: + defer.returnValue({"min_lifetime": None, "max_lifetime": None}) def get_retention_policy_for_room_txn(txn): txn.execute( diff --git a/synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql b/synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql new file mode 100644
index 0000000000..4f24c1405d --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/56/nuke_empty_communities_from_db.sql
@@ -0,0 +1,29 @@ +/* Copyright 2019 Werner Sembach + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Groups/communities now get deleted when the last member leaves. This is a one time cleanup to remove old groups/communities that were already empty before that change was made. +DELETE FROM group_attestations_remote WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM group_attestations_renewals WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM group_invites WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM group_roles WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM group_room_categories WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM group_rooms WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM group_summary_roles WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM group_summary_room_categories WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM group_summary_rooms WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM group_summary_users WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM local_group_membership WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM local_group_updates WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); +DELETE FROM groups WHERE group_id IN (SELECT group_id FROM groups WHERE NOT EXISTS (SELECT group_id FROM group_users WHERE group_id = groups.group_id)); diff --git a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres
index 4ad2929f32..023f493f7d 100644 --- a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres +++ b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres
@@ -658,10 +658,19 @@ CREATE TABLE presence_stream ( +CREATE TABLE profile_replication_status ( + host text NOT NULL, + last_synced_batch bigint NOT NULL +); + + + CREATE TABLE profiles ( user_id text NOT NULL, displayname text, - avatar_url text + avatar_url text, + batch bigint, + active smallint DEFAULT 1 NOT NULL ); @@ -1828,6 +1837,10 @@ CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id); +CREATE INDEX profiles_batch_idx ON profiles USING btree (batch); + + + CREATE INDEX public_room_index ON rooms USING btree (is_public); diff --git a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite
index bad33291e7..b3112c24a9 100644 --- a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite +++ b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite
@@ -6,7 +6,7 @@ CREATE TABLE presence_allow_inbound( observed_user_id TEXT NOT NULL, observer_us CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, UNIQUE(name) ); CREATE TABLE access_tokens( id BIGINT PRIMARY KEY, user_id TEXT NOT NULL, device_id TEXT, token TEXT NOT NULL, last_used BIGINT, UNIQUE(token) ); CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL ); -CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) ); +CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, batch BIGINT DEFAULT NULL, active SMALLINT DEFAULT 1 NOT NULL, UNIQUE(user_id) ); CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) ); CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER ); CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, UNIQUE (event_id) ); @@ -207,6 +207,8 @@ CREATE INDEX group_users_u_idx ON group_users(user_id); CREATE INDEX group_invites_u_idx ON group_invites(user_id); CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms(group_id, room_id); CREATE INDEX group_rooms_r_idx ON group_rooms(room_id); +CREATE INDEX profiles_batch_idx ON profiles(batch); +CREATE TABLE profile_replication_status ( host TEXT NOT NULL, last_synced_batch BIGINT NOT NULL ); CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL ); CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp); CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp); diff --git a/synapse/storage/data_stores/main/schema/full_schemas/README.md b/synapse/storage/data_stores/main/schema/full_schemas/README.md new file mode 100644
index 0000000000..bbd3f18604 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/full_schemas/README.md
@@ -0,0 +1,13 @@ +# Building full schema dumps + +These schemas need to be made from a database that has had all background updates run. + +To do so, use `scripts-dev/make_full_schema.sh`. This will produce +`full.sql.postgres ` and `full.sql.sqlite` files. + +Ensure postgres is installed and your user has the ability to run bash commands +such as `createdb`. + +``` +./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/ +``` diff --git a/synapse/storage/data_stores/main/schema/full_schemas/README.txt b/synapse/storage/data_stores/main/schema/full_schemas/README.txt deleted file mode 100644
index d3f6401344..0000000000 --- a/synapse/storage/data_stores/main/schema/full_schemas/README.txt +++ /dev/null
@@ -1,19 +0,0 @@ -Building full schema dumps -========================== - -These schemas need to be made from a database that has had all background updates run. - -Postgres --------- - -$ pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner $DATABASE_NAME| sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > full.sql.postgres - -SQLite ------- - -$ sqlite3 $DATABASE_FILE ".schema" > full.sql.sqlite - -After ------ - -Delete the CREATE statements for "sqlite_stat1", "schema_version", "applied_schema_deltas", and "applied_module_schemas". \ No newline at end of file diff --git a/synapse/storage/data_stores/main/search.py b/synapse/storage/data_stores/main/search.py
index 4eec2fae5e..47ebb8a214 100644 --- a/synapse/storage/data_stores/main/search.py +++ b/synapse/storage/data_stores/main/search.py
@@ -25,6 +25,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause +from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour from synapse.storage.database import Database from synapse.storage.engines import PostgresEngine, Sqlite3Engine @@ -384,7 +385,7 @@ class SearchStore(SearchBackgroundUpdateStore): """ clauses = [] - search_query = search_query = _parse_query(self.database_engine, search_term) + search_query = _parse_query(self.database_engine, search_term) args = [] @@ -453,7 +454,12 @@ class SearchStore(SearchBackgroundUpdateStore): results = list(filter(lambda row: row["room_id"] in room_ids, results)) - events = yield self.get_events_as_list([r["event_id"] for r in results]) + # We set redact_behaviour to BLOCK here to prevent redacted events being returned in + # search results (which is a data leak) + events = yield self.get_events_as_list( + [r["event_id"] for r in results], + redact_behaviour=EventRedactBehaviour.BLOCK, + ) event_map = {ev.event_id: ev for ev in events} @@ -495,7 +501,7 @@ class SearchStore(SearchBackgroundUpdateStore): """ clauses = [] - search_query = search_query = _parse_query(self.database_engine, search_term) + search_query = _parse_query(self.database_engine, search_term) args = [] @@ -600,7 +606,12 @@ class SearchStore(SearchBackgroundUpdateStore): results = list(filter(lambda row: row["room_id"] in room_ids, results)) - events = yield self.get_events_as_list([r["event_id"] for r in results]) + # We set redact_behaviour to BLOCK here to prevent redacted events being returned in + # search results (which is a data leak) + events = yield self.get_events_as_list( + [r["event_id"] for r in results], + redact_behaviour=EventRedactBehaviour.BLOCK, + ) event_map = {ev.event_id: ev for ev in events} diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index 9ef7b48c74..dcc6b43cdf 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py
@@ -278,7 +278,7 @@ class StateGroupWorkerStore( @defer.inlineCallbacks def get_room_predecessor(self, room_id): - """Get the predecessor room of an upgraded room if one exists. + """Get the predecessor of an upgraded room if it exists. Otherwise return None. Args: @@ -291,14 +291,22 @@ class StateGroupWorkerStore( * room_id (str): The room ID of the predecessor room * event_id (str): The ID of the tombstone event in the predecessor room + None if a predecessor key is not found, or is not a dictionary. + Raises: - NotFoundError if the room is unknown + NotFoundError if the given room is unknown """ # Retrieve the room's create event create_event = yield self.get_create_event_for_room(room_id) - # Return predecessor if present - return create_event.content.get("predecessor", None) + # Retrieve the predecessor key of the create event + predecessor = create_event.content.get("predecessor", None) + + # Ensure the key is a dictionary + if not isinstance(predecessor, dict): + return None + + return predecessor @defer.inlineCallbacks def get_create_event_for_room(self, room_id): @@ -318,7 +326,7 @@ class StateGroupWorkerStore( # If we can't find the create event, assume we've hit a dead end if not create_id: - raise NotFoundError("Unknown room %s" % (room_id)) + raise NotFoundError("Unknown room %s" % (room_id,)) # Retrieve the room's create event and return create_event = yield self.get_event(create_id) diff --git a/synapse/storage/schema/delta/48/profiles_batch.sql b/synapse/storage/schema/delta/48/profiles_batch.sql new file mode 100644
index 0000000000..e744c02fe8 --- /dev/null +++ b/synapse/storage/schema/delta/48/profiles_batch.sql
@@ -0,0 +1,36 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Add a batch number to track changes to profiles and the + * order they're made in so we can replicate user profiles + * to other hosts as they change + */ +ALTER TABLE profiles ADD COLUMN batch BIGINT DEFAULT NULL; + +/* + * Index on the batch number so we can get profiles + * by their batch + */ +CREATE INDEX profiles_batch_idx ON profiles(batch); + +/* + * A table to track what batch of user profiles has been + * synced to what profile replication target. + */ +CREATE TABLE profile_replication_status ( + host TEXT NOT NULL, + last_synced_batch BIGINT NOT NULL +); diff --git a/synapse/storage/schema/delta/50/profiles_deactivated_users.sql b/synapse/storage/schema/delta/50/profiles_deactivated_users.sql new file mode 100644
index 0000000000..c8893ecbe8 --- /dev/null +++ b/synapse/storage/schema/delta/50/profiles_deactivated_users.sql
@@ -0,0 +1,23 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * A flag saying whether the user owning the profile has been deactivated + * This really belongs on the users table, not here, but the users table + * stores users by their full user_id and profiles stores them by localpart, + * so we can't easily join between the two tables. Plus, the batch number + * realy ought to represent data in this table that has changed. + */ +ALTER TABLE profiles ADD COLUMN active SMALLINT DEFAULT 1 NOT NULL; diff --git a/synapse/storage/schema/delta/55/profile_replication_status_index.sql b/synapse/storage/schema/delta/55/profile_replication_status_index.sql new file mode 100644
index 0000000000..18a0f7e10c --- /dev/null +++ b/synapse/storage/schema/delta/55/profile_replication_status_index.sql
@@ -0,0 +1,17 @@ +/* Copyright 2019 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('profile_replication_status_host_index', '{}'); diff --git a/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql b/synapse/storage/schema/delta/55/room_retention.sql
index ee6cdf7a14..ee6cdf7a14 100644 --- a/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql +++ b/synapse/storage/schema/delta/55/room_retention.sql
diff --git a/synapse/streams/events.py b/synapse/streams/events.py
index b91fb2db7b..fcd2aaa9c9 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py
@@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Dict + from twisted.internet import defer from synapse.handlers.account_data import AccountDataEventSource @@ -35,7 +37,7 @@ class EventSources(object): def __init__(self, hs): self.sources = { name: cls(hs) for name, cls in EventSources.SOURCE_TYPES.items() - } + } # type: Dict[str, Any] self.store = hs.get_datastore() @defer.inlineCallbacks diff --git a/synapse/third_party_rules/access_rules.py b/synapse/third_party_rules/access_rules.py new file mode 100644
index 0000000000..253bba664b --- /dev/null +++ b/synapse/third_party_rules/access_rules.py
@@ -0,0 +1,586 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import email.utils + +from twisted.internet import defer + +from synapse.api.constants import EventTypes, JoinRules, Membership, RoomCreationPreset +from synapse.api.errors import SynapseError +from synapse.config._base import ConfigError +from synapse.types import get_domain_from_id + +ACCESS_RULES_TYPE = "im.vector.room.access_rules" +ACCESS_RULE_RESTRICTED = "restricted" +ACCESS_RULE_UNRESTRICTED = "unrestricted" +ACCESS_RULE_DIRECT = "direct" + +VALID_ACCESS_RULES = ( + ACCESS_RULE_DIRECT, + ACCESS_RULE_RESTRICTED, + ACCESS_RULE_UNRESTRICTED, +) + +# Rules to which we need to apply the power levels restrictions. +# +# These are all of the rules that neither: +# * forbid users from joining based on a server blacklist (which means that there +# is no need to apply power level restrictions), nor +# * target direct chats (since we allow both users to be room admins in this case). +# +# The power-level restrictions, when they are applied, prevent the following: +# * the default power level for users (users_default) being set to anything other than 0. +# * a non-default power level being assigned to any user which would be forbidden from +# joining a restricted room. +RULES_WITH_RESTRICTED_POWER_LEVELS = (ACCESS_RULE_UNRESTRICTED,) + + +class RoomAccessRules(object): + """Implementation of the ThirdPartyEventRules module API that allows federation admins + to define custom rules for specific events and actions. + Implements the custom behaviour for the "im.vector.room.access_rules" state event. + + Takes a config in the format: + + third_party_event_rules: + module: third_party_rules.RoomAccessRules + config: + # List of domains (server names) that can't be invited to rooms if the + # "restricted" rule is set. Defaults to an empty list. + domains_forbidden_when_restricted: [] + + # Identity server to use when checking the HS an email address belongs to + # using the /info endpoint. Required. + id_server: "vector.im" + + Don't forget to consider if you can invite users from your own domain. + """ + + def __init__(self, config, http_client): + self.http_client = http_client + + self.id_server = config["id_server"] + + self.domains_forbidden_when_restricted = config.get( + "domains_forbidden_when_restricted", [] + ) + + @staticmethod + def parse_config(config): + if "id_server" in config: + return config + else: + raise ConfigError("No IS for event rules TchapEventRules") + + def on_create_room(self, requester, config, is_requester_admin): + """Implements synapse.events.ThirdPartyEventRules.on_create_room + + Checks if a im.vector.room.access_rules event is being set during room creation. + If yes, make sure the event is correct. Otherwise, append an event with the + default rule to the initial state. + """ + is_direct = config.get("is_direct") + preset = config.get("preset") + access_rule = None + join_rule = None + + # If there's a rules event in the initial state, check if it complies with the + # spec for im.vector.room.access_rules and deny the request if not. + for event in config.get("initial_state", []): + if event["type"] == ACCESS_RULES_TYPE: + access_rule = event["content"].get("rule") + + # Make sure the event has a valid content. + if access_rule is None: + raise SynapseError(400, "Invalid access rule") + + # Make sure the rule name is valid. + if access_rule not in VALID_ACCESS_RULES: + raise SynapseError(400, "Invalid access rule") + + # Make sure the rule is "direct" if the room is a direct chat. + if (is_direct and access_rule != ACCESS_RULE_DIRECT) or ( + access_rule == ACCESS_RULE_DIRECT and not is_direct + ): + raise SynapseError(400, "Invalid access rule") + + if event["type"] == EventTypes.JoinRules: + join_rule = event["content"].get("join_rule") + + if access_rule is None: + # If there's no access rules event in the initial state, create one with the + # default setting. + if is_direct: + default_rule = ACCESS_RULE_DIRECT + else: + # If the default value for non-direct chat changes, we should make another + # case here for rooms created with either a "public" join_rule or the + # "public_chat" preset to make sure those keep defaulting to "restricted" + default_rule = ACCESS_RULE_RESTRICTED + + if not config.get("initial_state"): + config["initial_state"] = [] + + config["initial_state"].append( + { + "type": ACCESS_RULES_TYPE, + "state_key": "", + "content": {"rule": default_rule}, + } + ) + + access_rule = default_rule + + # Check that the preset or the join rule in use is compatible with the access + # rule, whether it's a user-defined one or the default one (i.e. if it involves + # a "public" join rule, the access rule must be "restricted"). + if ( + join_rule == JoinRules.PUBLIC or preset == RoomCreationPreset.PUBLIC_CHAT + ) and access_rule != ACCESS_RULE_RESTRICTED: + raise SynapseError(400, "Invalid access rule") + + # Check if the creator can override values for the power levels. + allowed = self._is_power_level_content_allowed( + config.get("power_level_content_override", {}), access_rule + ) + if not allowed: + raise SynapseError(400, "Invalid power levels content override") + + # Second loop for events we need to know the current rule to process. + for event in config.get("initial_state", []): + if event["type"] == EventTypes.PowerLevels: + allowed = self._is_power_level_content_allowed( + event["content"], access_rule + ) + if not allowed: + raise SynapseError(400, "Invalid power levels content") + + @defer.inlineCallbacks + def check_threepid_can_be_invited(self, medium, address, state_events): + """Implements synapse.events.ThirdPartyEventRules.check_threepid_can_be_invited + + Check if a threepid can be invited to the room via a 3PID invite given the current + rules and the threepid's address, by retrieving the HS it's mapped to from the + configured identity server, and checking if we can invite users from it. + """ + rule = self._get_rule_from_state(state_events) + + if medium != "email": + defer.returnValue(False) + + if rule != ACCESS_RULE_RESTRICTED: + # Only "restricted" requires filtering 3PID invites. We don't need to do + # anything for "direct" here, because only "restricted" requires filtering + # based on the HS the address is mapped to. + defer.returnValue(True) + + parsed_address = email.utils.parseaddr(address)[1] + if parsed_address != address: + # Avoid reproducing the security issue described here: + # https://matrix.org/blog/2019/04/18/security-update-sydent-1-0-2 + # It's probably not worth it but let's just be overly safe here. + defer.returnValue(False) + + # Get the HS this address belongs to from the identity server. + res = yield self.http_client.get_json( + "https://%s/_matrix/identity/api/v1/info" % (self.id_server,), + {"medium": medium, "address": address}, + ) + + # Look for a domain that's not forbidden from being invited. + if not res.get("hs"): + defer.returnValue(False) + if res.get("hs") in self.domains_forbidden_when_restricted: + defer.returnValue(False) + + defer.returnValue(True) + + def check_event_allowed(self, event, state_events): + """Implements synapse.events.ThirdPartyEventRules.check_event_allowed + + Checks the event's type and the current rule and calls the right function to + determine whether the event can be allowed. + """ + if event.type == ACCESS_RULES_TYPE: + return self._on_rules_change(event, state_events) + + # We need to know the rule to apply when processing the event types below. + rule = self._get_rule_from_state(state_events) + + if event.type == EventTypes.PowerLevels: + return self._is_power_level_content_allowed(event.content, rule) + + if event.type == EventTypes.Member or event.type == EventTypes.ThirdPartyInvite: + return self._on_membership_or_invite(event, rule, state_events) + + if event.type == EventTypes.JoinRules: + return self._on_join_rule_change(event, rule) + + if event.type == EventTypes.RoomAvatar: + return self._on_room_avatar_change(event, rule) + + if event.type == EventTypes.Name: + return self._on_room_name_change(event, rule) + + if event.type == EventTypes.Topic: + return self._on_room_topic_change(event, rule) + + return True + + def _on_rules_change(self, event, state_events): + """Implement the checks and behaviour specified on allowing or forbidding a new + im.vector.room.access_rules event. + + Args: + event (synapse.events.EventBase): The event to check. + state_events (dict[tuple[event type, state key], EventBase]): The state of the + room before the event was sent. + Returns: + bool, True if the event can be allowed, False otherwise. + """ + new_rule = event.content.get("rule") + + # Check for invalid values. + if new_rule not in VALID_ACCESS_RULES: + return False + + # We must not allow rooms with the "public" join rule to be given any other access + # rule than "restricted". + join_rule = self._get_join_rule_from_state(state_events) + if join_rule == JoinRules.PUBLIC and new_rule != ACCESS_RULE_RESTRICTED: + return False + + # Make sure we don't apply "direct" if the room has more than two members. + if new_rule == ACCESS_RULE_DIRECT: + existing_members, threepid_tokens = self._get_members_and_tokens_from_state( + state_events + ) + + if len(existing_members) > 2 or len(threepid_tokens) > 1: + return False + + prev_rules_event = state_events.get((ACCESS_RULES_TYPE, "")) + + # Now that we know the new rule doesn't break the "direct" case, we can allow any + # new rule in rooms that had none before. + if prev_rules_event is None: + return True + + prev_rule = prev_rules_event.content.get("rule") + + # Currently, we can only go from "restricted" to "unrestricted". + if prev_rule == ACCESS_RULE_RESTRICTED and new_rule == ACCESS_RULE_UNRESTRICTED: + return True + + return False + + def _on_membership_or_invite(self, event, rule, state_events): + """Applies the correct rule for incoming m.room.member and + m.room.third_party_invite events. + + Args: + event (synapse.events.EventBase): The event to check. + rule (str): The name of the rule to apply. + state_events (dict[tuple[event type, state key], EventBase]): The state of the + room before the event was sent. + Returns: + bool, True if the event can be allowed, False otherwise. + """ + if rule == ACCESS_RULE_RESTRICTED: + ret = self._on_membership_or_invite_restricted(event) + elif rule == ACCESS_RULE_UNRESTRICTED: + ret = self._on_membership_or_invite_unrestricted() + elif rule == ACCESS_RULE_DIRECT: + ret = self._on_membership_or_invite_direct(event, state_events) + else: + # We currently apply the default (restricted) if we don't know the rule, we + # might want to change that in the future. + ret = self._on_membership_or_invite_restricted(event) + + return ret + + def _on_membership_or_invite_restricted(self, event): + """Implements the checks and behaviour specified for the "restricted" rule. + + "restricted" currently means that users can only invite users if their server is + included in a limited list of domains. + + Args: + event (synapse.events.EventBase): The event to check. + Returns: + bool, True if the event can be allowed, False otherwise. + """ + # We're not applying the rules on m.room.third_party_member events here because + # the filtering on threepids is done in check_threepid_can_be_invited, which is + # called before check_event_allowed. + if event.type == EventTypes.ThirdPartyInvite: + return True + + # We only need to process "join" and "invite" memberships, in order to be backward + # compatible, e.g. if a user from a blacklisted server joined a restricted room + # before the rules started being enforced on the server, that user must be able to + # leave it. + if event.membership not in [Membership.JOIN, Membership.INVITE]: + return True + + invitee_domain = get_domain_from_id(event.state_key) + return invitee_domain not in self.domains_forbidden_when_restricted + + def _on_membership_or_invite_unrestricted(self): + """Implements the checks and behaviour specified for the "unrestricted" rule. + + "unrestricted" currently means that every event is allowed. + + Returns: + bool, True if the event can be allowed, False otherwise. + """ + return True + + def _on_membership_or_invite_direct(self, event, state_events): + """Implements the checks and behaviour specified for the "direct" rule. + + "direct" currently means that no member is allowed apart from the two initial + members the room was created for (i.e. the room's creator and their first + invitee). + + Args: + event (synapse.events.EventBase): The event to check. + state_events (dict[tuple[event type, state key], EventBase]): The state of the + room before the event was sent. + Returns: + bool, True if the event can be allowed, False otherwise. + """ + # Get the room memberships and 3PID invite tokens from the room's state. + existing_members, threepid_tokens = self._get_members_and_tokens_from_state( + state_events + ) + + # There should never be more than one 3PID invite in the room state: if the second + # original user came and left, and we're inviting them using their email address, + # given we know they have a Matrix account binded to the address (so they could + # join the first time), Synapse will successfully look it up before attempting to + # store an invite on the IS. + if len(threepid_tokens) == 1 and event.type == EventTypes.ThirdPartyInvite: + # If we already have a 3PID invite in flight, don't accept another one, unless + # the new one has the same invite token as its state key. This is because 3PID + # invite revocations must be allowed, and a revocation is basically a new 3PID + # invite event with an empty content and the same token as the invite it + # revokes. + return event.state_key in threepid_tokens + + if len(existing_members) == 2: + # If the user was within the two initial user of the room, Synapse would have + # looked it up successfully and thus sent a m.room.member here instead of + # m.room.third_party_invite. + if event.type == EventTypes.ThirdPartyInvite: + return False + + # We can only have m.room.member events here. The rule in this case is to only + # allow the event if its target is one of the initial two members in the room, + # i.e. the state key of one of the two m.room.member states in the room. + return event.state_key in existing_members + + # We're alone in the room (and always have been) and there's one 3PID invite in + # flight. + if len(existing_members) == 1 and len(threepid_tokens) == 1: + # We can only have m.room.member events here. In this case, we can only allow + # the event if it's either a m.room.member from the joined user (we can assume + # that the only m.room.member event is a join otherwise we wouldn't be able to + # send an event to the room) or an an invite event which target is the invited + # user. + target = event.state_key + is_from_threepid_invite = self._is_invite_from_threepid( + event, threepid_tokens[0] + ) + if is_from_threepid_invite or target == existing_members[0]: + return True + + return False + + return True + + def _is_power_level_content_allowed(self, content, access_rule): + """Check if a given power levels event is permitted under the given access rule. + + It shouldn't be allowed if it either changes the default PL to a non-0 value or + gives a non-0 PL to a user that would have been forbidden from joining the room + under a more restrictive access rule. + + Args: + content (dict[]): The content of the m.room.power_levels event to check. + access_rule (str): The access rule in place in this room. + Returns: + bool, True if the event can be allowed, False otherwise. + """ + # Check if we need to apply the restrictions with the current rule. + if access_rule not in RULES_WITH_RESTRICTED_POWER_LEVELS: + return True + + # If users_default is explicitly set to a non-0 value, deny the event. + users_default = content.get("users_default", 0) + if users_default: + return False + + users = content.get("users", {}) + for user_id, power_level in users.items(): + server_name = get_domain_from_id(user_id) + # Check the domain against the blacklist. If found, and the PL isn't 0, deny + # the event. + if ( + server_name in self.domains_forbidden_when_restricted + and power_level != 0 + ): + return False + + return True + + def _on_join_rule_change(self, event, rule): + """Check whether a join rule change is allowed. A join rule change is always + allowed unless the new join rule is "public" and the current access rule isn't + "restricted". + The rationale is that external users (those whose server would be denied access + to rooms enforcing the "restricted" access rule) should always rely on non- + external users for access to rooms, therefore they shouldn't be able to access + rooms that don't require an invite to be joined. + + Note that we currently rely on the default access rule being "restricted": during + room creation, the m.room.join_rules event will be sent *before* the + im.vector.room.access_rules one, so the access rule that will be considered here + in this case will be the default "restricted" one. This is fine since the + "restricted" access rule allows any value for the join rule, but we should keep + that in mind if we need to change the default access rule in the future. + + Args: + event (synapse.events.EventBase): The event to check. + rule (str): The name of the rule to apply. + Returns: + bool, True if the event can be allowed, False otherwise. + """ + if event.content.get("join_rule") == JoinRules.PUBLIC: + return rule == ACCESS_RULE_RESTRICTED + + return True + + def _on_room_avatar_change(self, event, rule): + """Check whether a change of room avatar is allowed. + The current rule is to forbid such a change in direct chats but allow it + everywhere else. + + Args: + event (synapse.events.EventBase): The event to check. + rule (str): The name of the rule to apply. + Returns: + bool, True if the event can be allowed, False otherwise. + """ + return rule != ACCESS_RULE_DIRECT + + def _on_room_name_change(self, event, rule): + """Check whether a change of room name is allowed. + The current rule is to forbid such a change in direct chats but allow it + everywhere else. + + Args: + event (synapse.events.EventBase): The event to check. + rule (str): The name of the rule to apply. + Returns: + bool, True if the event can be allowed, False otherwise. + """ + return rule != ACCESS_RULE_DIRECT + + def _on_room_topic_change(self, event, rule): + """Check whether a change of room topic is allowed. + The current rule is to forbid such a change in direct chats but allow it + everywhere else. + + Args: + event (synapse.events.EventBase): The event to check. + rule (str): The name of the rule to apply. + Returns: + bool, True if the event can be allowed, False otherwise. + """ + return rule != ACCESS_RULE_DIRECT + + @staticmethod + def _get_rule_from_state(state_events): + """Extract the rule to be applied from the given set of state events. + + Args: + state_events (dict[tuple[event type, state key], EventBase]): The set of state + events. + Returns: + str, the name of the rule (either "direct", "restricted" or "unrestricted") + """ + access_rules = state_events.get((ACCESS_RULES_TYPE, "")) + if access_rules is None: + rule = ACCESS_RULE_RESTRICTED + else: + rule = access_rules.content.get("rule") + return rule + + @staticmethod + def _get_join_rule_from_state(state_events): + """Extract the room's join rule from the given set of state events. + + Args: + state_events (dict[tuple[event type, state key], EventBase]): The set of state + events. + Returns: + str, the name of the join rule (either "public", or "invite") + """ + join_rule_event = state_events.get((EventTypes.JoinRules, "")) + if join_rule_event is None: + return None + return join_rule_event.content.get("join_rule") + + @staticmethod + def _get_members_and_tokens_from_state(state_events): + """Retrieves from a list of state events the list of users that have a + m.room.member event in the room, and the tokens of 3PID invites in the room. + + Args: + state_events (dict[tuple[event type, state key], EventBase]): The set of state + events. + Returns: + existing_members (list[str]): List of targets of the m.room.member events in + the state. + threepid_invite_tokens (list[str]): List of tokens of the 3PID invites in the + state. + """ + existing_members = [] + threepid_invite_tokens = [] + for key, state_event in state_events.items(): + if key[0] == EventTypes.Member and state_event.content: + existing_members.append(state_event.state_key) + if key[0] == EventTypes.ThirdPartyInvite and state_event.content: + # Don't include revoked invites. + threepid_invite_tokens.append(state_event.state_key) + + return existing_members, threepid_invite_tokens + + @staticmethod + def _is_invite_from_threepid(invite, threepid_invite_token): + """Checks whether the given invite follows the given 3PID invite. + + Args: + invite (EventBase): The m.room.member event with "invite" membership. + threepid_invite_token (str): The state key from the 3PID invite. + """ + token = ( + invite.content.get("third_party_invite", {}) + .get("signed", {}) + .get("token", "") + ) + + return token == threepid_invite_token diff --git a/synapse/types.py b/synapse/types.py
index aafc3ffe74..55b2cd1836 100644 --- a/synapse/types.py +++ b/synapse/types.py
@@ -17,6 +17,8 @@ import re import string from collections import namedtuple +from six.moves import filter + import attr from signedjson.key import decode_verify_key_bytes from unpaddedbase64 import decode_base64 @@ -238,6 +240,19 @@ def contains_invalid_mxid_characters(localpart): return any(c not in mxid_localpart_allowed_characters for c in localpart) +def strip_invalid_mxid_characters(localpart): + """Removes any invalid characters from an mxid + + Args: + localpart (basestring): the localpart to be stripped + + Returns: + localpart (basestring): the localpart having been stripped + """ + filtered = filter(lambda c: c in mxid_localpart_allowed_characters, localpart) + return "".join(filtered) + + UPPER_CASE_PATTERN = re.compile(b"[A-Z_]") # the following is a pattern which matches '=', and bytes which are not allowed in a mxid diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 84f5ae22c3..2e8f6543e5 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py
@@ -271,7 +271,7 @@ class _CacheDescriptorBase(object): else: self.function_to_call = orig - arg_spec = inspect.getargspec(orig) + arg_spec = inspect.getfullargspec(orig) all_args = arg_spec.args if "cache_context" in all_args: diff --git a/synapse/util/caches/snapshot_cache.py b/synapse/util/caches/snapshot_cache.py deleted file mode 100644
index 8318db8d2c..0000000000 --- a/synapse/util/caches/snapshot_cache.py +++ /dev/null
@@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.util.async_helpers import ObservableDeferred - - -class SnapshotCache(object): - """Cache for snapshots like the response of /initialSync. - The response of initialSync only has to be a recent snapshot of the - server state. It shouldn't matter to clients if it is a few minutes out - of date. - - This caches a deferred response. Until the deferred completes it will be - returned from the cache. This means that if the client retries the request - while the response is still being computed, that original response will be - used rather than trying to compute a new response. - - Once the deferred completes it will removed from the cache after 5 minutes. - We delay removing it from the cache because a client retrying its request - could race with us finishing computing the response. - - Rather than tracking precisely how long something has been in the cache we - keep two generations of completed responses. Every 5 minutes discard the - old generation, move the new generation to the old generation, and set the - new generation to be empty. This means that a result will be in the cache - somewhere between 5 and 10 minutes. - """ - - DURATION_MS = 5 * 60 * 1000 # Cache results for 5 minutes. - - def __init__(self): - self.pending_result_cache = {} # Request that haven't finished yet. - self.prev_result_cache = {} # The older requests that have finished. - self.next_result_cache = {} # The newer requests that have finished. - self.time_last_rotated_ms = 0 - - def rotate(self, time_now_ms): - # Rotate once if the cache duration has passed since the last rotation. - if time_now_ms - self.time_last_rotated_ms >= self.DURATION_MS: - self.prev_result_cache = self.next_result_cache - self.next_result_cache = {} - self.time_last_rotated_ms += self.DURATION_MS - - # Rotate again if the cache duration has passed twice since the last - # rotation. - if time_now_ms - self.time_last_rotated_ms >= self.DURATION_MS: - self.prev_result_cache = self.next_result_cache - self.next_result_cache = {} - self.time_last_rotated_ms = time_now_ms - - def get(self, time_now_ms, key): - self.rotate(time_now_ms) - # This cache is intended to deduplicate requests, so we expect it to be - # missed most of the time. So we just lookup the key in all of the - # dictionaries rather than trying to short circuit the lookup if the - # key is found. - result = self.prev_result_cache.get(key) - result = self.next_result_cache.get(key, result) - result = self.pending_result_cache.get(key, result) - if result is not None: - return result.observe() - else: - return None - - def set(self, time_now_ms, key, deferred): - self.rotate(time_now_ms) - - result = ObservableDeferred(deferred) - - self.pending_result_cache[key] = result - - def shuffle_along(r): - # When the deferred completes we shuffle it along to the first - # generation of the result cache. So that it will eventually - # expire from the rotation of that cache. - self.next_result_cache[key] = result - self.pending_result_cache.pop(key, None) - return r - - result.addBoth(shuffle_along) - - return result.observe() diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py
index 982c6d81ca..6a2464cab3 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py
@@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,12 +15,15 @@ # limitations under the License. import random +import re import string import six from six import PY2, PY3 from six.moves import range +from synapse.api.errors import Codes, SynapseError + _string_with_symbols = string.digits + string.ascii_letters + ".,;:^&*-_+=#~@" # random_string and random_string_with_symbols are used for a range of things, @@ -27,6 +31,8 @@ _string_with_symbols = string.digits + string.ascii_letters + ".,;:^&*-_+=#~@" # we get cryptographically-secure randoms. rand = random.SystemRandom() +client_secret_regex = re.compile(r"^[0-9a-zA-Z.=_-]+$") + def random_string(length): return "".join(rand.choice(string.ascii_letters) for _ in range(length)) @@ -109,3 +115,11 @@ def exception_to_unicode(e): return msg.decode("utf-8", errors="replace") else: return msg + + +def assert_valid_client_secret(client_secret): + """Validate that a given string matches the client_secret regex defined by the spec""" + if client_secret_regex.match(client_secret) is None: + raise SynapseError( + 400, "Invalid client_secret parameter", errcode=Codes.INVALID_PARAM + ) diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py
index 3ec1dfb0c2..34ce7cac16 100644 --- a/synapse/util/threepids.py +++ b/synapse/util/threepids.py
@@ -16,11 +16,14 @@ import logging import re +from twisted.internet import defer + logger = logging.getLogger(__name__) +@defer.inlineCallbacks def check_3pid_allowed(hs, medium, address): - """Checks whether a given format of 3PID is allowed to be used on this HS + """Checks whether a given 3PID is allowed to be used on this HS Args: hs (synapse.server.HomeServer): server @@ -28,9 +31,36 @@ def check_3pid_allowed(hs, medium, address): address (str): address within that medium (e.g. "wotan@matrix.org") msisdns need to first have been canonicalised Returns: - bool: whether the 3PID medium/address is allowed to be added to this HS + defered bool: whether the 3PID medium/address is allowed to be added to this HS """ + if hs.config.check_is_for_allowed_local_3pids: + data = yield hs.get_simple_http_client().get_json( + "https://%s%s" + % ( + hs.config.check_is_for_allowed_local_3pids, + "/_matrix/identity/api/v1/internal-info", + ), + {"medium": medium, "address": address}, + ) + + # Check for invalid response + if "hs" not in data and "shadow_hs" not in data: + defer.returnValue(False) + + # Check if this user is intended to register for this homeserver + if ( + data.get("hs") != hs.config.server_name + and data.get("shadow_hs") != hs.config.server_name + ): + defer.returnValue(False) + + if data.get("requires_invite", False) and not data.get("invited", False): + # Requires an invite but hasn't been invited + defer.returnValue(False) + + defer.returnValue(True) + if hs.config.allowed_local_3pids: for constraint in hs.config.allowed_local_3pids: logger.debug( @@ -43,8 +73,8 @@ def check_3pid_allowed(hs, medium, address): if medium == constraint["medium"] and re.match( constraint["pattern"], address ): - return True + defer.returnValue(True) else: - return True + defer.returnValue(True) - return False + defer.returnValue(False) diff --git a/sytest-blacklist b/sytest-blacklist
index 79b2d4402a..fd50197b13 100644 --- a/sytest-blacklist +++ b/sytest-blacklist
@@ -36,3 +36,24 @@ Inbound federation of state requires event_id as a mandatory paramater # Blacklisted until https://github.com/matrix-org/synapse/pull/6486 lands Can upload self-signing keys + +# flaky test +If remote user leaves room we no longer receive device updates + +# flaky test +Can re-join room if re-invited + +# flaky test +Forgotten room messages cannot be paginated + +# flaky test +Local device key changes get to remote servers + +# flaky test +Old leaves are present in gapped incremental syncs + +# flaky test on workers +Old members are included in gappy incr LL sync if they start speaking + +# flaky test on workers +Presence changes to UNAVAILABLE are reported to remote room members diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py
index fdfa2cbbc4..854eb6c024 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py
@@ -183,10 +183,6 @@ class E2eKeysHandlerTestCase(unittest.TestCase): ) self.assertDictEqual(devices["master_keys"], {local_user: keys2["master_key"]}) - test_replace_master_key.skip = ( - "Disabled waiting on #https://github.com/matrix-org/synapse/pull/6486" - ) - @defer.inlineCallbacks def test_reupload_signatures(self): """re-uploading a signature should not fail""" @@ -507,7 +503,3 @@ class E2eKeysHandlerTestCase(unittest.TestCase): ], other_master_key["signatures"][local_user]["ed25519:" + usersigning_pubkey], ) - - test_upload_signatures.skip = ( - "Disabled waiting on #https://github.com/matrix-org/synapse/pull/6486" - ) diff --git a/tests/handlers/test_identity.py b/tests/handlers/test_identity.py new file mode 100644
index 0000000000..34f6bfb422 --- /dev/null +++ b/tests/handlers/test_identity.py
@@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import Mock + +from twisted.internet import defer + +import synapse.rest.admin +from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import account + +from tests import unittest + + +class ThreepidISRewrittenURLTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + account.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + self.address = "test@test" + self.is_server_name = "testis" + self.rewritten_is_url = "int.testis" + + config = self.default_config() + config["trusted_third_party_id_servers"] = [self.is_server_name] + config["rewrite_identity_server_urls"] = { + self.is_server_name: self.rewritten_is_url + } + + mock_http_client = Mock(spec=["get_json", "post_json_get_json"]) + mock_http_client.get_json.side_effect = defer.succeed({}) + mock_http_client.post_json_get_json.return_value = defer.succeed( + {"address": self.address, "medium": "email"} + ) + + self.hs = self.setup_test_homeserver( + config=config, simple_http_client=mock_http_client + ) + + mock_blacklisting_http_client = Mock(spec=["get_json", "post_json_get_json"]) + mock_blacklisting_http_client.get_json.side_effect = defer.succeed({}) + mock_blacklisting_http_client.post_json_get_json.return_value = defer.succeed( + {"address": self.address, "medium": "email"} + ) + + # TODO: This class does not use a singleton to get it's http client + # This should be fixed for easier testing + # https://github.com/matrix-org/synapse-dinsic/issues/26 + self.hs.get_handlers().identity_handler.blacklisting_http_client = ( + mock_blacklisting_http_client + ) + + return self.hs + + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("kermit", "monkey") + + def test_rewritten_id_server(self): + """ + Tests that, when validating a 3PID association while rewriting the IS's server + name: + * the bind request is done against the rewritten hostname + * the original, non-rewritten, server name is stored in the database + """ + handler = self.hs.get_handlers().identity_handler + post_json_get_json = handler.blacklisting_http_client.post_json_get_json + store = self.hs.get_datastore() + + creds = {"sid": "123", "client_secret": "some_secret"} + + # Make sure processing the mocked response goes through. + data = self.get_success( + handler.bind_threepid( + client_secret=creds["client_secret"], + sid=creds["sid"], + mxid=self.user_id, + id_server=self.is_server_name, + use_v2=False, + ) + ) + self.assertEqual(data.get("address"), self.address) + + # Check that the request was done against the rewritten server name. + post_json_get_json.assert_called_once_with( + "https://%s/_matrix/identity/api/v1/3pid/bind" % self.rewritten_is_url, + { + "sid": creds["sid"], + "client_secret": creds["client_secret"], + "mxid": self.user_id, + }, + headers={}, + ) + + # Check that the original server name is saved in the database instead of the + # rewritten one. + id_servers = self.get_success( + store.get_id_servers_user_bound(self.user_id, "email", self.address) + ) + self.assertEqual(id_servers, [self.is_server_name]) diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index d60c124eec..2311040201 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py
@@ -67,13 +67,11 @@ class ProfileTestCase(unittest.TestCase): self.bob = UserID.from_string("@4567:test") self.alice = UserID.from_string("@alice:remote") - yield self.store.create_profile(self.frank.localpart) - self.handler = hs.get_profile_handler() @defer.inlineCallbacks def test_get_my_name(self): - yield self.store.set_profile_displayname(self.frank.localpart, "Frank") + yield self.store.set_profile_displayname(self.frank.localpart, "Frank", 1) displayname = yield self.handler.get_displayname(self.frank) @@ -116,8 +114,7 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_incoming_fed_query(self): - yield self.store.create_profile("caroline") - yield self.store.set_profile_displayname("caroline", "Caroline") + yield self.store.set_profile_displayname("caroline", "Caroline", 1) response = yield self.query_handlers["profile"]( {"user_id": "@caroline:test", "field": "displayname"} @@ -128,7 +125,7 @@ class ProfileTestCase(unittest.TestCase): @defer.inlineCallbacks def test_get_my_avatar(self): yield self.store.set_profile_avatar_url( - self.frank.localpart, "http://my.server/me.png" + self.frank.localpart, "http://my.server/me.png", 1 ) avatar_url = yield self.handler.get_avatar_url(self.frank) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 1e9ba3a201..ae43c6ea7e 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py
@@ -20,6 +20,7 @@ from twisted.internet import defer from synapse.api.constants import UserTypes from synapse.api.errors import Codes, ResourceLimitError, SynapseError from synapse.handlers.register import RegistrationHandler +from synapse.rest.client.v2_alpha.register import _map_email_to_displayname from synapse.types import RoomAlias, UserID, create_requester from .. import unittest @@ -256,6 +257,26 @@ class RegistrationTestCase(unittest.HomeserverTestCase): self.handler.register_user(localpart=invalid_user_id), SynapseError ) + def test_email_to_displayname_mapping(self): + """Test that custom emails are mapped to new user displaynames correctly""" + self._check_mapping( + "jack-phillips.rivers@big-org.com", "Jack-Phillips Rivers [Big-Org]" + ) + + self._check_mapping("bob.jones@matrix.org", "Bob Jones [Tchap Admin]") + + self._check_mapping("bob-jones.blabla@gouv.fr", "Bob-Jones Blabla [Gouv]") + + # Multibyte unicode characters + self._check_mapping( + "j\u030a\u0065an-poppy.seed@example.com", + "J\u030a\u0065an-Poppy Seed [Example]", + ) + + def _check_mapping(self, i, expected): + result = _map_email_to_displayname(i) + self.assertEqual(result, expected) + @defer.inlineCallbacks def get_or_create_user(self, requester, localpart, displayname, password_hash=None): """Creates a new user if the user does not exist, diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index d9d312f0fb..8e6b0b7536 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py
@@ -21,8 +21,12 @@ from tests import unittest # The expected number of state events in a fresh public room. EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM = 5 + # The expected number of state events in a fresh private room. -EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM = 6 +# +# Note: we increase this by 1 on the dinsic branch as we send +# a "im.vector.room.access_rules" state event into new private rooms +EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM = 7 class StatsRoomTests(unittest.HomeserverTestCase): diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py
index c973521907..e163a46f6b 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py
@@ -15,15 +15,22 @@ import json +from mock import Mock + +from twisted.internet import defer + import synapse.rest.admin from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import account from tests import unittest -class IdentityTestCase(unittest.HomeserverTestCase): +class IdentityDisabledTestCase(unittest.HomeserverTestCase): + """Tests that 3PID lookup attempts fail when the HS's config disallows them.""" servlets = [ + account.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, @@ -32,24 +39,113 @@ class IdentityTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() + config["trusted_third_party_id_servers"] = ["testis"] config["enable_3pid_lookup"] = False self.hs = self.setup_test_homeserver(config=config) return self.hs + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + def test_3pid_invite_disabled(self): + request, channel = self.make_request( + b"POST", "/createRoom", b"{}", access_token=self.tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + room_id = channel.json_body["room_id"] + + params = { + "id_server": "testis", + "medium": "email", + "address": "test@example.com", + } + request_data = json.dumps(params) + request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii") + request, channel = self.make_request( + b"POST", request_url, request_data, access_token=self.tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"403", channel.result) + def test_3pid_lookup_disabled(self): - self.hs.config.enable_3pid_lookup = False + url = ( + "/_matrix/client/unstable/account/3pid/lookup" + "?id_server=testis&medium=email&address=foo@bar.baz" + ) + request, channel = self.make_request("GET", url, access_token=self.tok) + self.render(request) + self.assertEqual(channel.result["code"], b"403", channel.result) - self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") + def test_3pid_bulk_lookup_disabled(self): + url = "/_matrix/client/unstable/account/3pid/bulk_lookup" + data = { + "id_server": "testis", + "threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]], + } + request_data = json.dumps(data) + request, channel = self.make_request( + "POST", url, request_data, access_token=self.tok + ) + self.render(request) + self.assertEqual(channel.result["code"], b"403", channel.result) + + +class IdentityEnabledTestCase(unittest.HomeserverTestCase): + """Tests that 3PID lookup attempts succeed when the HS's config allows them.""" + servlets = [ + account.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + + config = self.default_config() + config["enable_3pid_lookup"] = True + config["trusted_third_party_id_servers"] = ["testis"] + + mock_http_client = Mock(spec=["get_json", "post_json_get_json"]) + mock_http_client.get_json.return_value = defer.succeed((200, "{}")) + mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}")) + + self.hs = self.setup_test_homeserver( + config=config, simple_http_client=mock_http_client + ) + + # TODO: This class does not use a singleton to get it's http client + # This should be fixed for easier testing + # https://github.com/matrix-org/synapse-dinsic/issues/26 + self.hs.get_handlers().identity_handler.http_client = ( + mock_http_client + ) + + return self.hs + + def prepare(self, reactor, clock, hs): + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + def test_3pid_invite_enabled(self): request, channel = self.make_request( - b"POST", "/createRoom", b"{}", access_token=tok + b"POST", "/createRoom", b"{}", access_token=self.tok ) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) room_id = channel.json_body["room_id"] + # Replace the blacklisting SimpleHttpClient with our mock + self.hs.get_room_member_handler().simple_http_client = Mock( + spec=["get_json", "post_json_get_json"] + ) + self.hs.get_room_member_handler().simple_http_client.get_json.return_value = ( + defer.succeed((200, "{}")) + ) + params = { "id_server": "testis", "medium": "email", @@ -58,7 +154,44 @@ class IdentityTestCase(unittest.HomeserverTestCase): request_data = json.dumps(params) request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii") request, channel = self.make_request( - b"POST", request_url, request_data, access_token=tok + b"POST", request_url, request_data, access_token=self.tok ) self.render(request) - self.assertEquals(channel.result["code"], b"403", channel.result) + + get_json = self.hs.get_handlers().identity_handler.http_client.get_json + get_json.assert_called_once_with( + "https://testis/_matrix/identity/api/v1/lookup", + {"address": "test@example.com", "medium": "email"}, + ) + + def test_3pid_lookup_enabled(self): + url = ( + "/_matrix/client/unstable/account/3pid/lookup" + "?id_server=testis&medium=email&address=foo@bar.baz" + ) + request, channel = self.make_request("GET", url, access_token=self.tok) + self.render(request) + + get_json = self.hs.get_simple_http_client().get_json + get_json.assert_called_once_with( + "https://testis/_matrix/identity/api/v1/lookup", + {"address": "foo@bar.baz", "medium": "email"}, + ) + + def test_3pid_bulk_lookup_enabled(self): + url = "/_matrix/client/unstable/account/3pid/bulk_lookup" + data = { + "id_server": "testis", + "threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]], + } + request_data = json.dumps(data) + request, channel = self.make_request( + "POST", url, request_data, access_token=self.tok + ) + self.render(request) + + post_json = self.hs.get_simple_http_client().post_json_get_json + post_json.assert_called_once_with( + "https://testis/_matrix/identity/api/v1/bulk_lookup", + {"threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]]}, + ) diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
index 95475bb651..9e549d8a91 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py
@@ -34,6 +34,7 @@ class RetentionTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() + config["default_room_version"] = "1" config["retention"] = { "enabled": True, "default_policy": { @@ -203,6 +204,7 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() + config["default_room_version"] = "1" config["retention"] = { "enabled": True, } diff --git a/tests/rest/client/test_room_access_rules.py b/tests/rest/client/test_room_access_rules.py new file mode 100644
index 0000000000..f10ae0adeb --- /dev/null +++ b/tests/rest/client/test_room_access_rules.py
@@ -0,0 +1,726 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import random +import string + +from mock import Mock + +from twisted.internet import defer +from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.third_party_rules.access_rules import ( + ACCESS_RULE_DIRECT, + ACCESS_RULE_RESTRICTED, + ACCESS_RULE_UNRESTRICTED, + ACCESS_RULES_TYPE, +) + +from tests import unittest + + +class RoomAccessTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + + config["third_party_event_rules"] = { + "module": "synapse.third_party_rules.access_rules.RoomAccessRules", + "config": { + "domains_forbidden_when_restricted": ["forbidden_domain"], + "id_server": "testis", + }, + } + config["trusted_third_party_id_servers"] = ["testis"] + + def send_invite(destination, room_id, event_id, pdu): + return defer.succeed(pdu) + + def get_json(uri, args={}, headers=None): + address_domain = args["address"].split("@")[1] + return defer.succeed({"hs": address_domain}) + + def post_json_get_json(uri, post_json, args={}, headers=None): + token = "".join(random.choice(string.ascii_letters) for _ in range(10)) + return defer.succeed( + { + "token": token, + "public_keys": [ + { + "public_key": "serverpublickey", + "key_validity_url": "https://testis/pubkey/isvalid", + }, + { + "public_key": "phemeralpublickey", + "key_validity_url": "https://testis/pubkey/ephemeral/isvalid", + }, + ], + "display_name": "f...@b...", + } + ) + + mock_federation_client = Mock(spec=["send_invite"]) + mock_federation_client.send_invite.side_effect = send_invite + + mock_http_client = Mock( + spec=["get_json", "post_json_get_json"], + ) + # Mocking the response for /info on the IS API. + mock_http_client.get_json.side_effect = get_json + # Mocking the response for /store-invite on the IS API. + mock_http_client.post_json_get_json.side_effect = post_json_get_json + self.hs = self.setup_test_homeserver( + config=config, + federation_client=mock_federation_client, + simple_http_client=mock_http_client, + ) + + # TODO: This class does not use a singleton to get it's http client + # This should be fixed for easier testing + # https://github.com/matrix-org/synapse-dinsic/issues/26 + self.hs.get_handlers().identity_handler.blacklisting_http_client = mock_http_client + + return self.hs + + def prepare(self, reactor, clock, homeserver): + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + self.restricted_room = self.create_room() + self.unrestricted_room = self.create_room(rule=ACCESS_RULE_UNRESTRICTED) + self.direct_rooms = [ + self.create_room(direct=True), + self.create_room(direct=True), + self.create_room(direct=True), + ] + + self.invitee_id = self.register_user("invitee", "test") + self.invitee_tok = self.login("invitee", "test") + + self.helper.invite( + room=self.direct_rooms[0], + src=self.user_id, + targ=self.invitee_id, + tok=self.tok, + ) + + def test_create_room_no_rule(self): + """Tests that creating a room with no rule will set the default value.""" + room_id = self.create_room() + rule = self.current_rule_in_room(room_id) + + self.assertEqual(rule, ACCESS_RULE_RESTRICTED) + + def test_create_room_direct_no_rule(self): + """Tests that creating a direct room with no rule will set the default value.""" + room_id = self.create_room(direct=True) + rule = self.current_rule_in_room(room_id) + + self.assertEqual(rule, ACCESS_RULE_DIRECT) + + def test_create_room_valid_rule(self): + """Tests that creating a room with a valid rule will set the right value.""" + room_id = self.create_room(rule=ACCESS_RULE_UNRESTRICTED) + rule = self.current_rule_in_room(room_id) + + self.assertEqual(rule, ACCESS_RULE_UNRESTRICTED) + + def test_create_room_invalid_rule(self): + """Tests that creating a room with an invalid rule will set fail.""" + self.create_room(rule=ACCESS_RULE_DIRECT, expected_code=400) + + def test_create_room_direct_invalid_rule(self): + """Tests that creating a direct room with an invalid rule will fail. + """ + self.create_room(direct=True, rule=ACCESS_RULE_RESTRICTED, expected_code=400) + + def test_public_room(self): + """Tests that it's not possible to have a room with the public join rule and an + access rule that's not restricted. + """ + # Creating a room with the public_chat preset should succeed and set the access + # rule to restricted. + preset_room_id = self.create_room(preset=RoomCreationPreset.PUBLIC_CHAT) + self.assertEqual( + self.current_rule_in_room(preset_room_id), ACCESS_RULE_RESTRICTED + ) + + # Creating a room with the public join rule in its initial state should succeed + # and set the access rule to restricted. + init_state_room_id = self.create_room( + initial_state=[ + { + "type": "m.room.join_rules", + "content": {"join_rule": JoinRules.PUBLIC}, + } + ] + ) + self.assertEqual( + self.current_rule_in_room(init_state_room_id), ACCESS_RULE_RESTRICTED + ) + + # Changing access rule to unrestricted should fail. + self.change_rule_in_room( + preset_room_id, ACCESS_RULE_UNRESTRICTED, expected_code=403 + ) + self.change_rule_in_room( + init_state_room_id, ACCESS_RULE_UNRESTRICTED, expected_code=403 + ) + + # Changing access rule to direct should fail. + self.change_rule_in_room(preset_room_id, ACCESS_RULE_DIRECT, expected_code=403) + self.change_rule_in_room( + init_state_room_id, ACCESS_RULE_DIRECT, expected_code=403 + ) + + # Changing join rule to public in an unrestricted room should fail. + self.change_join_rule_in_room( + self.unrestricted_room, JoinRules.PUBLIC, expected_code=403 + ) + # Changing join rule to public in an direct room should fail. + self.change_join_rule_in_room( + self.direct_rooms[0], JoinRules.PUBLIC, expected_code=403 + ) + + # Creating a new room with the public_chat preset and an access rule that isn't + # restricted should fail. + self.create_room( + preset=RoomCreationPreset.PUBLIC_CHAT, + rule=ACCESS_RULE_UNRESTRICTED, + expected_code=400, + ) + self.create_room( + preset=RoomCreationPreset.PUBLIC_CHAT, + rule=ACCESS_RULE_DIRECT, + expected_code=400, + ) + + # Creating a room with the public join rule in its initial state and an access + # rule that isn't restricted should fail. + self.create_room( + initial_state=[ + { + "type": "m.room.join_rules", + "content": {"join_rule": JoinRules.PUBLIC}, + } + ], + rule=ACCESS_RULE_UNRESTRICTED, + expected_code=400, + ) + self.create_room( + initial_state=[ + { + "type": "m.room.join_rules", + "content": {"join_rule": JoinRules.PUBLIC}, + } + ], + rule=ACCESS_RULE_DIRECT, + expected_code=400, + ) + + def test_restricted(self): + """Tests that in restricted mode we're unable to invite users from blacklisted + servers but can invite other users. + """ + # We can't invite a user from a forbidden HS. + self.helper.invite( + room=self.restricted_room, + src=self.user_id, + targ="@test:forbidden_domain", + tok=self.tok, + expect_code=403, + ) + + # We can invite a user which HS isn't forbidden. + self.helper.invite( + room=self.restricted_room, + src=self.user_id, + targ="@test:allowed_domain", + tok=self.tok, + expect_code=200, + ) + + # We can't send a 3PID invite to an address that is mapped to a forbidden HS. + self.send_threepid_invite( + address="test@forbidden_domain", + room_id=self.restricted_room, + expected_code=403, + ) + + # We can send a 3PID invite to an address that is mapped to an HS that's not + # forbidden. + self.send_threepid_invite( + address="test@allowed_domain", + room_id=self.restricted_room, + expected_code=200, + ) + + def test_direct(self): + """Tests that, in direct mode, other users than the initial two can't be invited, + but the following scenario works: + * invited user joins the room + * invited user leaves the room + * room creator re-invites invited user + Also tests that a user from a HS that's in the list of forbidden domains (to use + in restricted mode) can be invited. + """ + not_invited_user = "@not_invited:forbidden_domain" + + # We can't invite a new user to the room. + self.helper.invite( + room=self.direct_rooms[0], + src=self.user_id, + targ=not_invited_user, + tok=self.tok, + expect_code=403, + ) + + # The invited user can join the room. + self.helper.join( + room=self.direct_rooms[0], + user=self.invitee_id, + tok=self.invitee_tok, + expect_code=200, + ) + + # The invited user can leave the room. + self.helper.leave( + room=self.direct_rooms[0], + user=self.invitee_id, + tok=self.invitee_tok, + expect_code=200, + ) + + # The invited user can be re-invited to the room. + self.helper.invite( + room=self.direct_rooms[0], + src=self.user_id, + targ=self.invitee_id, + tok=self.tok, + expect_code=200, + ) + + # If we're alone in the room and have always been the only member, we can invite + # someone. + self.helper.invite( + room=self.direct_rooms[1], + src=self.user_id, + targ=not_invited_user, + tok=self.tok, + expect_code=200, + ) + + # Disable the 3pid invite ratelimiter + burst = self.hs.config.rc_third_party_invite.burst_count + per_second = self.hs.config.rc_third_party_invite.per_second + self.hs.config.rc_third_party_invite.burst_count = 10 + self.hs.config.rc_third_party_invite.per_second = 0.1 + + # We can't send a 3PID invite to a room that already has two members. + self.send_threepid_invite( + address="test@allowed_domain", + room_id=self.direct_rooms[0], + expected_code=403, + ) + + # We can't send a 3PID invite to a room that already has a pending invite. + self.send_threepid_invite( + address="test@allowed_domain", + room_id=self.direct_rooms[1], + expected_code=403, + ) + + # We can send a 3PID invite to a room in which we've always been the only member. + self.send_threepid_invite( + address="test@forbidden_domain", + room_id=self.direct_rooms[2], + expected_code=200, + ) + + # We can send a 3PID invite to a room in which there's a 3PID invite. + self.send_threepid_invite( + address="test@forbidden_domain", + room_id=self.direct_rooms[2], + expected_code=403, + ) + + self.hs.config.rc_third_party_invite.burst_count = burst + self.hs.config.rc_third_party_invite.per_second = per_second + + def test_unrestricted(self): + """Tests that, in unrestricted mode, we can invite whoever we want, but we can + only change the power level of users that wouldn't be forbidden in restricted + mode. + """ + # We can invite + self.helper.invite( + room=self.unrestricted_room, + src=self.user_id, + targ="@test:forbidden_domain", + tok=self.tok, + expect_code=200, + ) + + self.helper.invite( + room=self.unrestricted_room, + src=self.user_id, + targ="@test:not_forbidden_domain", + tok=self.tok, + expect_code=200, + ) + + # We can send a 3PID invite to an address that is mapped to a forbidden HS. + self.send_threepid_invite( + address="test@forbidden_domain", + room_id=self.unrestricted_room, + expected_code=200, + ) + + # We can send a 3PID invite to an address that is mapped to an HS that's not + # forbidden. + self.send_threepid_invite( + address="test@allowed_domain", + room_id=self.unrestricted_room, + expected_code=200, + ) + + # We can send a power level event that doesn't redefine the default PL or set a + # non-default PL for a user that would be forbidden in restricted mode. + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.PowerLevels, + body={"users": {self.user_id: 100, "@test:not_forbidden_domain": 10}}, + tok=self.tok, + expect_code=200, + ) + + # We can't send a power level event that redefines the default PL and doesn't set + # a non-default PL for a user that would be forbidden in restricted mode. + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.PowerLevels, + body={ + "users": {self.user_id: 100, "@test:not_forbidden_domain": 10}, + "users_default": 10, + }, + tok=self.tok, + expect_code=403, + ) + + # We can't send a power level event that doesn't redefines the default PL but sets + # a non-default PL for a user that would be forbidden in restricted mode. + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.PowerLevels, + body={"users": {self.user_id: 100, "@test:forbidden_domain": 10}}, + tok=self.tok, + expect_code=403, + ) + + def test_change_rules(self): + """Tests that we can only change the current rule from restricted to + unrestricted. + """ + # We can change the rule from restricted to unrestricted. + self.change_rule_in_room( + room_id=self.restricted_room, + new_rule=ACCESS_RULE_UNRESTRICTED, + expected_code=200, + ) + + # We can't change the rule from restricted to direct. + self.change_rule_in_room( + room_id=self.restricted_room, new_rule=ACCESS_RULE_DIRECT, expected_code=403 + ) + + # We can't change the rule from unrestricted to restricted. + self.change_rule_in_room( + room_id=self.unrestricted_room, + new_rule=ACCESS_RULE_RESTRICTED, + expected_code=403, + ) + + # We can't change the rule from unrestricted to direct. + self.change_rule_in_room( + room_id=self.unrestricted_room, + new_rule=ACCESS_RULE_DIRECT, + expected_code=403, + ) + + # We can't change the rule from direct to restricted. + self.change_rule_in_room( + room_id=self.direct_rooms[0], + new_rule=ACCESS_RULE_RESTRICTED, + expected_code=403, + ) + + # We can't change the rule from direct to unrestricted. + self.change_rule_in_room( + room_id=self.direct_rooms[0], + new_rule=ACCESS_RULE_UNRESTRICTED, + expected_code=403, + ) + + def test_change_room_avatar(self): + """Tests that changing the room avatar is always allowed unless the room is a + direct chat, in which case it's forbidden. + """ + + avatar_content = { + "info": {"h": 398, "mimetype": "image/jpeg", "size": 31037, "w": 394}, + "url": "mxc://example.org/JWEIFJgwEIhweiWJE", + } + + self.helper.send_state( + room_id=self.restricted_room, + event_type=EventTypes.RoomAvatar, + body=avatar_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.RoomAvatar, + body=avatar_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.direct_rooms[0], + event_type=EventTypes.RoomAvatar, + body=avatar_content, + tok=self.tok, + expect_code=403, + ) + + def test_change_room_name(self): + """Tests that changing the room name is always allowed unless the room is a direct + chat, in which case it's forbidden. + """ + + name_content = {"name": "My super room"} + + self.helper.send_state( + room_id=self.restricted_room, + event_type=EventTypes.Name, + body=name_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.Name, + body=name_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.direct_rooms[0], + event_type=EventTypes.Name, + body=name_content, + tok=self.tok, + expect_code=403, + ) + + def test_change_room_topic(self): + """Tests that changing the room topic is always allowed unless the room is a + direct chat, in which case it's forbidden. + """ + + topic_content = {"topic": "Welcome to this room"} + + self.helper.send_state( + room_id=self.restricted_room, + event_type=EventTypes.Topic, + body=topic_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.unrestricted_room, + event_type=EventTypes.Topic, + body=topic_content, + tok=self.tok, + expect_code=200, + ) + + self.helper.send_state( + room_id=self.direct_rooms[0], + event_type=EventTypes.Topic, + body=topic_content, + tok=self.tok, + expect_code=403, + ) + + def test_revoke_3pid_invite_direct(self): + """Tests that revoking a 3PID invite doesn't cause the room access rules module to + confuse the revokation as a new 3PID invite. + """ + invite_token = "sometoken" + + invite_body = { + "display_name": "ker...@exa...", + "public_keys": [ + { + "key_validity_url": "https://validity_url", + "public_key": "ta8IQ0u1sp44HVpxYi7dFOdS/bfwDjcy4xLFlfY5KOA", + }, + { + "key_validity_url": "https://validity_url", + "public_key": "4_9nzEeDwR5N9s51jPodBiLnqH43A2_g2InVT137t9I", + }, + ], + "key_validity_url": "https://validity_url", + "public_key": "ta8IQ0u1sp44HVpxYi7dFOdS/bfwDjcy4xLFlfY5KOA", + } + + self.send_state_with_state_key( + room_id=self.direct_rooms[1], + event_type=EventTypes.ThirdPartyInvite, + state_key=invite_token, + body=invite_body, + tok=self.tok, + ) + + self.send_state_with_state_key( + room_id=self.direct_rooms[1], + event_type=EventTypes.ThirdPartyInvite, + state_key=invite_token, + body={}, + tok=self.tok, + ) + + invite_token = "someothertoken" + + self.send_state_with_state_key( + room_id=self.direct_rooms[1], + event_type=EventTypes.ThirdPartyInvite, + state_key=invite_token, + body=invite_body, + tok=self.tok, + ) + + def create_room( + self, + direct=False, + rule=None, + preset=RoomCreationPreset.TRUSTED_PRIVATE_CHAT, + initial_state=None, + expected_code=200, + ): + content = {"is_direct": direct, "preset": preset} + + if rule: + content["initial_state"] = [ + {"type": ACCESS_RULES_TYPE, "state_key": "", "content": {"rule": rule}} + ] + + if initial_state: + if "initial_state" not in content: + content["initial_state"] = [] + + content["initial_state"] += initial_state + + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/createRoom", + json.dumps(content), + access_token=self.tok, + ) + self.render(request) + + self.assertEqual(channel.code, expected_code, channel.result) + + if expected_code == 200: + return channel.json_body["room_id"] + + def current_rule_in_room(self, room_id): + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, ACCESS_RULES_TYPE), + access_token=self.tok, + ) + self.render(request) + + self.assertEqual(channel.code, 200, channel.result) + return channel.json_body["rule"] + + def change_rule_in_room(self, room_id, new_rule, expected_code=200): + data = {"rule": new_rule} + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, ACCESS_RULES_TYPE), + json.dumps(data), + access_token=self.tok, + ) + self.render(request) + + self.assertEqual(channel.code, expected_code, channel.result) + + def change_join_rule_in_room(self, room_id, new_join_rule, expected_code=200): + data = {"join_rule": new_join_rule} + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, EventTypes.JoinRules), + json.dumps(data), + access_token=self.tok, + ) + self.render(request) + + self.assertEqual(channel.code, expected_code, channel.result) + + def send_threepid_invite(self, address, room_id, expected_code=200): + params = {"id_server": "testis", "medium": "email", "address": address} + + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/%s/invite" % room_id, + json.dumps(params), + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + + def send_state_with_state_key( + self, room_id, event_type, state_key, body, tok, expect_code=200 + ): + path = "/_matrix/client/r0/rooms/%s/state/%s/%s" % ( + room_id, + event_type, + state_key, + ) + + request, channel = self.make_request( + "PUT", path, json.dumps(body), access_token=tok + ) + self.render(request) + + self.assertEqual(channel.code, expect_code, channel.result) + + return channel.json_body diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py
index 12c5e95cb5..6088bce154 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py
@@ -237,6 +237,7 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): config = self.default_config() config["require_auth_for_profile_requests"] = True + config["limit_profile_requests_to_known_users"] = True self.hs = self.setup_test_homeserver(config=config) return self.hs diff --git a/tests/rest/client/v2_alpha/test_password_policy.py b/tests/rest/client/v2_alpha/test_password_policy.py new file mode 100644
index 0000000000..37f970c6b0 --- /dev/null +++ b/tests/rest/client/v2_alpha/test_password_policy.py
@@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from synapse.api.constants import LoginType +from synapse.api.errors import Codes +from synapse.rest import admin +from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import account, password_policy, register + +from tests import unittest + + +class PasswordPolicyTestCase(unittest.HomeserverTestCase): + """Tests the password policy feature and its compliance with MSC2000. + + When validating a password, Synapse does the necessary checks in this order: + + 1. Password is long enough + 2. Password contains digit(s) + 3. Password contains symbol(s) + 4. Password contains uppercase letter(s) + 5. Password contains lowercase letter(s) + + Therefore, each test in this test case that tests whether a password triggers the + right error code to be returned provides a password good enough to pass the previous + steps but not the one it's testing (nor any step that comes after). + """ + + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + register.register_servlets, + password_policy.register_servlets, + account.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + self.register_url = "/_matrix/client/r0/register" + self.policy = { + "enabled": True, + "minimum_length": 10, + "require_digit": True, + "require_symbol": True, + "require_lowercase": True, + "require_uppercase": True, + } + + config = self.default_config() + config["password_config"] = {"policy": self.policy} + + hs = self.setup_test_homeserver(config=config) + return hs + + def test_get_policy(self): + """Tests if the /password_policy endpoint returns the configured policy.""" + + request, channel = self.make_request( + "GET", "/_matrix/client/r0/password_policy" + ) + self.render(request) + + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual( + channel.json_body, + { + "m.minimum_length": 10, + "m.require_digit": True, + "m.require_symbol": True, + "m.require_lowercase": True, + "m.require_uppercase": True, + }, + channel.result, + ) + + def test_password_too_short(self): + request_data = json.dumps({"username": "kermit", "password": "shorty"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.PASSWORD_TOO_SHORT, channel.result + ) + + def test_password_no_digit(self): + request_data = json.dumps({"username": "kermit", "password": "longerpassword"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT, channel.result + ) + + def test_password_no_symbol(self): + request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.PASSWORD_NO_SYMBOL, channel.result + ) + + def test_password_no_uppercase(self): + request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword!"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.PASSWORD_NO_UPPERCASE, channel.result + ) + + def test_password_no_lowercase(self): + request_data = json.dumps({"username": "kermit", "password": "L0NGERPASSWORD!"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.PASSWORD_NO_LOWERCASE, channel.result + ) + + def test_password_compliant(self): + request_data = json.dumps({"username": "kermit", "password": "L0ngerpassword!"}) + request, channel = self.make_request("POST", self.register_url, request_data) + self.render(request) + + # Getting a 401 here means the password has passed validation and the server has + # responded with a list of registration flows. + self.assertEqual(channel.code, 401, channel.result) + + def test_password_change(self): + """This doesn't test every possible use case, only that hitting /account/password + triggers the password validation code. + """ + compliant_password = "C0mpl!antpassword" + not_compliant_password = "notcompliantpassword" + + user_id = self.register_user("kermit", compliant_password) + tok = self.login("kermit", compliant_password) + + request_data = json.dumps( + { + "new_password": not_compliant_password, + "auth": { + "password": compliant_password, + "type": LoginType.PASSWORD, + "user": user_id, + }, + } + ) + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/account/password", + request_data, + access_token=tok, + ) + self.render(request) + + self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index c0d0d2b44e..d99b100d0f 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py
@@ -19,8 +19,12 @@ import datetime import json import os +from mock import Mock + import pkg_resources +from twisted.internet import defer + import synapse.rest.admin from synapse.api.constants import LoginType from synapse.api.errors import Codes @@ -261,6 +265,47 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ) +class RegisterHideProfileTestCase(unittest.HomeserverTestCase): + + servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource] + + def make_homeserver(self, reactor, clock): + + self.url = b"/_matrix/client/r0/register" + + config = self.default_config() + config["enable_registration"] = True + config["show_users_in_user_directory"] = False + config["replicate_user_profiles_to"] = ["fakeserver"] + + mock_http_client = Mock(spec=["get_json", "post_json_get_json"]) + mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}")) + + self.hs = self.setup_test_homeserver( + config=config, simple_http_client=mock_http_client + ) + + return self.hs + + def test_profile_hidden(self): + user_id = self.register_user("kermit", "monkey") + + post_json = self.hs.get_simple_http_client().post_json_get_json + + # We expect post_json_get_json to have been called twice: once with the original + # profile and once with the None profile resulting from the request to hide it + # from the user directory. + self.assertEqual(post_json.call_count, 2, post_json.call_args_list) + + # Get the args (and not kwargs) passed to post_json. + args = post_json.call_args[0] + # Make sure the last call was attempting to replicate profiles. + split_uri = args[0].split("/") + self.assertEqual(split_uri[len(split_uri) - 1], "replicate_profiles", args[0]) + # Make sure the last profile update was overriding the user's profile to None. + self.assertEqual(args[1]["batch"][user_id], None, args[1]) + + class AccountValidityTestCase(unittest.HomeserverTestCase): servlets = [ @@ -269,6 +314,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): login.register_servlets, sync.register_servlets, account_validity.register_servlets, + account.register_servlets, ] def make_homeserver(self, reactor, clock): @@ -361,6 +407,138 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): ) +class AccountValidityUserDirectoryTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.client.v1.profile.register_servlets, + synapse.rest.client.v1.room.register_servlets, + synapse.rest.client.v2_alpha.user_directory.register_servlets, + login.register_servlets, + register.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + account_validity.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + + # Set accounts to expire after a week + config["enable_registration"] = True + config["account_validity"] = { + "enabled": True, + "period": 604800000, # Time in ms for 1 week + } + config["replicate_user_profiles_to"] = "test.is" + + # Mock homeserver requests to an identity server + mock_http_client = Mock(spec=["post_json_get_json"]) + mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}")) + + self.hs = self.setup_test_homeserver( + config=config, simple_http_client=mock_http_client + ) + + return self.hs + + def test_expired_user_in_directory(self): + """Test that an expired user is hidden in the user directory""" + # Create an admin user to search the user directory + admin_id = self.register_user("admin", "adminpassword", admin=True) + admin_tok = self.login("admin", "adminpassword") + + # Ensure the admin never expires + url = "/_matrix/client/unstable/admin/account_validity/validity" + params = { + "user_id": admin_id, + "expiration_ts": 999999999999, + "enable_renewal_emails": False, + } + request_data = json.dumps(params) + request, channel = self.make_request( + b"POST", url, request_data, access_token=admin_tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + # Create a user to expire + username = "kermit" + user_id = self.register_user(username, "monkey") + self.login(username, "monkey") + + self.pump(1000) + self.reactor.advance(1000) + self.pump() + + # Expire the user + url = "/_matrix/client/unstable/admin/account_validity/validity" + params = { + "user_id": user_id, + "expiration_ts": 0, + "enable_renewal_emails": False, + } + request_data = json.dumps(params) + request, channel = self.make_request( + b"POST", url, request_data, access_token=admin_tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + # Wait for the background job to run which hides expired users in the directory + self.pump(60 * 60 * 1000) + + # Mock the homeserver's HTTP client + post_json = self.hs.get_simple_http_client().post_json_get_json + + # Check if the homeserver has replicated the user's profile to the identity server + self.assertNotEquals(post_json.call_args, None, post_json.call_args) + payload = post_json.call_args[0][1] + batch = payload.get("batch") + self.assertNotEquals(batch, None, batch) + self.assertEquals(len(batch), 1, batch) + replicated_user_id = list(batch.keys())[0] + self.assertEquals(replicated_user_id, user_id, replicated_user_id) + + # There was replicated information about our user + # Check that it's None, signifying that the user should be removed from the user + # directory because they were expired + replicated_content = batch[user_id] + self.assertIsNone(replicated_content) + + # Now renew the user, and check they get replicated again to the identity server + url = "/_matrix/client/unstable/admin/account_validity/validity" + params = { + "user_id": user_id, + "expiration_ts": 99999999999, + "enable_renewal_emails": False, + } + request_data = json.dumps(params) + request, channel = self.make_request( + b"POST", url, request_data, access_token=admin_tok + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + self.pump(10) + self.reactor.advance(10) + self.pump() + + # Check if the homeserver has replicated the user's profile to the identity server + post_json = self.hs.get_simple_http_client().post_json_get_json + self.assertNotEquals(post_json.call_args, None, post_json.call_args) + payload = post_json.call_args[0][1] + batch = payload.get("batch") + self.assertNotEquals(batch, None, batch) + self.assertEquals(len(batch), 1, batch) + replicated_user_id = list(batch.keys())[0] + self.assertEquals(replicated_user_id, user_id, replicated_user_id) + + # There was replicated information about our user + # Check that it's not None, signifying that the user is back in the user + # directory + replicated_content = batch[user_id] + self.assertIsNotNone(replicated_content) + + class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): servlets = [ @@ -391,9 +569,8 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # Email config. self.email_attempts = [] - def sendmail(*args, **kwargs): + async def sendmail(*args, **kwargs): self.email_attempts.append((args, kwargs)) - return config["email"] = { "enable_notifs": True, @@ -512,7 +689,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): "POST", "account/deactivate", request_data, access_token=tok ) self.render(request) - self.assertEqual(request.code, 200) + self.assertEqual(request.code, 200, channel.result) self.reactor.advance(datetime.timedelta(days=8).total_seconds()) diff --git a/tests/rulecheck/__init__.py b/tests/rulecheck/__init__.py new file mode 100644
index 0000000000..a354d38ca8 --- /dev/null +++ b/tests/rulecheck/__init__.py
@@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/rulecheck/test_domainrulecheck.py b/tests/rulecheck/test_domainrulecheck.py new file mode 100644
index 0000000000..1accc70dc9 --- /dev/null +++ b/tests/rulecheck/test_domainrulecheck.py
@@ -0,0 +1,334 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json + +import synapse.rest.admin +from synapse.config._base import ConfigError +from synapse.rest.client.v1 import login, room +from synapse.rulecheck.domain_rule_checker import DomainRuleChecker + +from tests import unittest +from tests.server import make_request, render + + +class DomainRuleCheckerTestCase(unittest.TestCase): + def test_allowed(self): + config = { + "default": False, + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + }, + "domains_prevented_from_being_invited_to_published_rooms": ["target_two"], + } + check = DomainRuleChecker(config) + self.assertTrue( + check.user_may_invite( + "test:source_one", "test:target_one", None, "room", False + ) + ) + self.assertTrue( + check.user_may_invite( + "test:source_one", "test:target_two", None, "room", False + ) + ) + self.assertTrue( + check.user_may_invite( + "test:source_two", "test:target_two", None, "room", False + ) + ) + + # User can invite internal user to a published room + self.assertTrue( + check.user_may_invite( + "test:source_one", "test1:target_one", None, "room", False, True + ) + ) + + # User can invite external user to a non-published room + self.assertTrue( + check.user_may_invite( + "test:source_one", "test:target_two", None, "room", False, False + ) + ) + + def test_disallowed(self): + config = { + "default": True, + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + "source_four": [], + }, + } + check = DomainRuleChecker(config) + self.assertFalse( + check.user_may_invite( + "test:source_one", "test:target_three", None, "room", False + ) + ) + self.assertFalse( + check.user_may_invite( + "test:source_two", "test:target_three", None, "room", False + ) + ) + self.assertFalse( + check.user_may_invite( + "test:source_two", "test:target_one", None, "room", False + ) + ) + self.assertFalse( + check.user_may_invite( + "test:source_four", "test:target_one", None, "room", False + ) + ) + + # User cannot invite external user to a published room + self.assertTrue( + check.user_may_invite( + "test:source_one", "test:target_two", None, "room", False, True + ) + ) + + def test_default_allow(self): + config = { + "default": True, + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + }, + } + check = DomainRuleChecker(config) + self.assertTrue( + check.user_may_invite( + "test:source_three", "test:target_one", None, "room", False + ) + ) + + def test_default_deny(self): + config = { + "default": False, + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + }, + } + check = DomainRuleChecker(config) + self.assertFalse( + check.user_may_invite( + "test:source_three", "test:target_one", None, "room", False + ) + ) + + def test_config_parse(self): + config = { + "default": False, + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + }, + } + self.assertEquals(config, DomainRuleChecker.parse_config(config)) + + def test_config_parse_failure(self): + config = { + "domain_mapping": { + "source_one": ["target_one", "target_two"], + "source_two": ["target_two"], + } + } + self.assertRaises(ConfigError, DomainRuleChecker.parse_config, config) + + +class DomainRuleCheckerRoomTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + ] + + hijack_auth = False + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["trusted_third_party_id_servers"] = ["localhost"] + + config["spam_checker"] = { + "module": "synapse.rulecheck.domain_rule_checker.DomainRuleChecker", + "config": { + "default": True, + "domain_mapping": {}, + "can_only_join_rooms_with_invite": True, + "can_only_create_one_to_one_rooms": True, + "can_only_invite_during_room_creation": True, + "can_invite_by_third_party_id": False, + }, + } + + hs = self.setup_test_homeserver(config=config) + return hs + + def prepare(self, reactor, clock, hs): + self.admin_user_id = self.register_user("admin_user", "pass", admin=True) + self.admin_access_token = self.login("admin_user", "pass") + + self.normal_user_id = self.register_user("normal_user", "pass", admin=False) + self.normal_access_token = self.login("normal_user", "pass") + + self.other_user_id = self.register_user("other_user", "pass", admin=False) + + def test_admin_can_create_room(self): + channel = self._create_room(self.admin_access_token) + assert channel.result["code"] == b"200", channel.result + + def test_normal_user_cannot_create_empty_room(self): + channel = self._create_room(self.normal_access_token) + assert channel.result["code"] == b"403", channel.result + + def test_normal_user_cannot_create_room_with_multiple_invites(self): + channel = self._create_room( + self.normal_access_token, + content={"invite": [self.other_user_id, self.admin_user_id]}, + ) + assert channel.result["code"] == b"403", channel.result + + # Test that it correctly counts both normal and third party invites + channel = self._create_room( + self.normal_access_token, + content={ + "invite": [self.other_user_id], + "invite_3pid": [{"medium": "email", "address": "foo@example.com"}], + }, + ) + assert channel.result["code"] == b"403", channel.result + + # Test that it correctly rejects third party invites + channel = self._create_room( + self.normal_access_token, + content={ + "invite": [], + "invite_3pid": [{"medium": "email", "address": "foo@example.com"}], + }, + ) + assert channel.result["code"] == b"403", channel.result + + def test_normal_user_can_room_with_single_invites(self): + channel = self._create_room( + self.normal_access_token, content={"invite": [self.other_user_id]} + ) + assert channel.result["code"] == b"200", channel.result + + def test_cannot_join_public_room(self): + channel = self._create_room(self.admin_access_token) + assert channel.result["code"] == b"200", channel.result + + room_id = channel.json_body["room_id"] + + self.helper.join( + room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=403 + ) + + def test_can_join_invited_room(self): + channel = self._create_room(self.admin_access_token) + assert channel.result["code"] == b"200", channel.result + + room_id = channel.json_body["room_id"] + + self.helper.invite( + room_id, + src=self.admin_user_id, + targ=self.normal_user_id, + tok=self.admin_access_token, + ) + + self.helper.join( + room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200 + ) + + def test_cannot_invite(self): + channel = self._create_room(self.admin_access_token) + assert channel.result["code"] == b"200", channel.result + + room_id = channel.json_body["room_id"] + + self.helper.invite( + room_id, + src=self.admin_user_id, + targ=self.normal_user_id, + tok=self.admin_access_token, + ) + + self.helper.join( + room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200 + ) + + self.helper.invite( + room_id, + src=self.normal_user_id, + targ=self.other_user_id, + tok=self.normal_access_token, + expect_code=403, + ) + + def test_cannot_3pid_invite(self): + """Test that unbound 3pid invites get rejected. + """ + channel = self._create_room(self.admin_access_token) + assert channel.result["code"] == b"200", channel.result + + room_id = channel.json_body["room_id"] + + self.helper.invite( + room_id, + src=self.admin_user_id, + targ=self.normal_user_id, + tok=self.admin_access_token, + ) + + self.helper.join( + room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200 + ) + + self.helper.invite( + room_id, + src=self.normal_user_id, + targ=self.other_user_id, + tok=self.normal_access_token, + expect_code=403, + ) + + request, channel = self.make_request( + "POST", + "rooms/%s/invite" % (room_id), + {"address": "foo@bar.com", "medium": "email", "id_server": "localhost"}, + access_token=self.normal_access_token, + ) + self.render(request) + self.assertEqual(channel.code, 403, channel.result["body"]) + + def _create_room(self, token, content={}): + path = "/_matrix/client/r0/createRoom?access_token=%s" % (token,) + + request, channel = make_request( + self.hs.get_reactor(), + "POST", + path, + content=json.dumps(content).encode("utf8"), + ) + render(request, self.resource, self.hs.get_reactor()) + + return channel diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py
index fc279340d4..bf674dd184 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py
@@ -37,9 +37,13 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.reactor.advance(12345678) user_id = "@user:id" + device_id = "MY_DEVICE" + + # Insert a user IP + self.get_success(self.store.store_device(user_id, device_id, "display name",)) self.get_success( self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + user_id, "access_token", "ip", "user_agent", device_id ) ) @@ -47,14 +51,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.reactor.advance(10) result = self.get_success( - self.store.get_last_client_ip_by_device(user_id, "device_id") + self.store.get_last_client_ip_by_device(user_id, device_id) ) - r = result[(user_id, "device_id")] + r = result[(user_id, device_id)] self.assertDictContainsSubset( { "user_id": user_id, - "device_id": "device_id", + "device_id": device_id, "ip": "ip", "user_agent": "user_agent", "last_seen": 12345678000, @@ -209,14 +213,16 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.store.db.updates.do_next_background_update(100), by=0.1 ) - # Insert a user IP user_id = "@user:id" + device_id = "MY_DEVICE" + + # Insert a user IP + self.get_success(self.store.store_device(user_id, device_id, "display name",)) self.get_success( self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + user_id, "access_token", "ip", "user_agent", device_id ) ) - # Force persisting to disk self.reactor.advance(200) @@ -224,7 +230,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.get_success( self.store.db.simple_update( table="devices", - keyvalues={"user_id": user_id, "device_id": "device_id"}, + keyvalues={"user_id": user_id, "device_id": device_id}, updatevalues={"last_seen": None, "ip": None, "user_agent": None}, desc="test_devices_last_seen_bg_update", ) @@ -232,14 +238,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # We should now get nulls when querying result = self.get_success( - self.store.get_last_client_ip_by_device(user_id, "device_id") + self.store.get_last_client_ip_by_device(user_id, device_id) ) - r = result[(user_id, "device_id")] + r = result[(user_id, device_id)] self.assertDictContainsSubset( { "user_id": user_id, - "device_id": "device_id", + "device_id": device_id, "ip": None, "user_agent": None, "last_seen": None, @@ -272,14 +278,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # We should now get the correct result again result = self.get_success( - self.store.get_last_client_ip_by_device(user_id, "device_id") + self.store.get_last_client_ip_by_device(user_id, device_id) ) - r = result[(user_id, "device_id")] + r = result[(user_id, device_id)] self.assertDictContainsSubset( { "user_id": user_id, - "device_id": "device_id", + "device_id": device_id, "ip": "ip", "user_agent": "user_agent", "last_seen": 0, @@ -296,11 +302,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): self.store.db.updates.do_next_background_update(100), by=0.1 ) - # Insert a user IP user_id = "@user:id" + device_id = "MY_DEVICE" + + # Insert a user IP + self.get_success(self.store.store_device(user_id, device_id, "display name",)) self.get_success( self.store.insert_client_ip( - user_id, "access_token", "ip", "user_agent", "device_id" + user_id, "access_token", "ip", "user_agent", device_id ) ) @@ -324,7 +333,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): "access_token": "access_token", "ip": "ip", "user_agent": "user_agent", - "device_id": "device_id", + "device_id": device_id, "last_seen": 0, } ], @@ -347,14 +356,14 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): # But we should still get the correct values for the device result = self.get_success( - self.store.get_last_client_ip_by_device(user_id, "device_id") + self.store.get_last_client_ip_by_device(user_id, device_id) ) - r = result[(user_id, "device_id")] + r = result[(user_id, device_id)] self.assertDictContainsSubset( { "user_id": user_id, - "device_id": "device_id", + "device_id": device_id, "ip": "ip", "user_agent": "user_agent", "last_seen": 0, diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py
index e07ff01201..95f309fbbc 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py
@@ -14,6 +14,7 @@ # limitations under the License. import signedjson.key +import unpaddedbase64 from twisted.internet.defer import Deferred @@ -21,11 +22,17 @@ from synapse.storage.keys import FetchKeyResult import tests.unittest -KEY_1 = signedjson.key.decode_verify_key_base64( - "ed25519", "key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw" + +def decode_verify_key_base64(key_id: str, key_base64: str): + key_bytes = unpaddedbase64.decode_base64(key_base64) + return signedjson.key.decode_verify_key_bytes(key_id, key_bytes) + + +KEY_1 = decode_verify_key_base64( + "ed25519:key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw" ) -KEY_2 = signedjson.key.decode_verify_key_base64( - "ed25519", "key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" +KEY_2 = decode_verify_key_base64( + "ed25519:key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" ) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py
index 9b6f7211ae..7458a37e54 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py
@@ -33,9 +33,7 @@ class ProfileStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_displayname(self): - yield self.store.create_profile(self.u_frank.localpart) - - yield self.store.set_profile_displayname(self.u_frank.localpart, "Frank") + yield self.store.set_profile_displayname(self.u_frank.localpart, "Frank", 1) self.assertEquals( "Frank", (yield self.store.get_profile_displayname(self.u_frank.localpart)) @@ -43,10 +41,8 @@ class ProfileStoreTestCase(unittest.TestCase): @defer.inlineCallbacks def test_avatar_url(self): - yield self.store.create_profile(self.u_frank.localpart) - yield self.store.set_profile_avatar_url( - self.u_frank.localpart, "http://my.site/here" + self.u_frank.localpart, "http://my.site/here", 1 ) self.assertEquals( diff --git a/tests/test_federation.py b/tests/test_federation.py
index ad165d7295..68684460c6 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py
@@ -1,6 +1,6 @@ from mock import Mock -from twisted.internet.defer import maybeDeferred, succeed +from twisted.internet.defer import ensureDeferred, maybeDeferred, succeed from synapse.events import FrozenEvent from synapse.logging.context import LoggingContext @@ -70,8 +70,10 @@ class MessageAcceptTests(unittest.TestCase): ) # Send the join, it should return None (which is not an error) - d = self.handler.on_receive_pdu( - "test.serv", join_event, sent_to_us_directly=True + d = ensureDeferred( + self.handler.on_receive_pdu( + "test.serv", join_event, sent_to_us_directly=True + ) ) self.reactor.advance(1) self.assertEqual(self.successResultOf(d), None) @@ -119,8 +121,10 @@ class MessageAcceptTests(unittest.TestCase): ) with LoggingContext(request="lying_event"): - d = self.handler.on_receive_pdu( - "test.serv", lying_event, sent_to_us_directly=True + d = ensureDeferred( + self.handler.on_receive_pdu( + "test.serv", lying_event, sent_to_us_directly=True + ) ) # Step the reactor, so the database fetches come back diff --git a/tests/test_types.py b/tests/test_types.py
index 9ab5f829b0..7390a1ce62 100644 --- a/tests/test_types.py +++ b/tests/test_types.py
@@ -12,23 +12,27 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from six import string_types from synapse.api.errors import SynapseError -from synapse.types import GroupID, RoomAlias, UserID, map_username_to_mxid_localpart +from synapse.types import ( + GroupID, + RoomAlias, + UserID, + map_username_to_mxid_localpart, + strip_invalid_mxid_characters, +) from tests import unittest -from tests.utils import TestHomeServer -mock_homeserver = TestHomeServer(hostname="my.domain") - -class UserIDTestCase(unittest.TestCase): +class UserIDTestCase(unittest.HomeserverTestCase): def test_parse(self): - user = UserID.from_string("@1234abcd:my.domain") + user = UserID.from_string("@1234abcd:test") self.assertEquals("1234abcd", user.localpart) - self.assertEquals("my.domain", user.domain) - self.assertEquals(True, mock_homeserver.is_mine(user)) + self.assertEquals("test", user.domain) + self.assertEquals(True, self.hs.is_mine(user)) def test_pase_empty(self): with self.assertRaises(SynapseError): @@ -48,13 +52,13 @@ class UserIDTestCase(unittest.TestCase): self.assertTrue(userA != userB) -class RoomAliasTestCase(unittest.TestCase): +class RoomAliasTestCase(unittest.HomeserverTestCase): def test_parse(self): - room = RoomAlias.from_string("#channel:my.domain") + room = RoomAlias.from_string("#channel:test") self.assertEquals("channel", room.localpart) - self.assertEquals("my.domain", room.domain) - self.assertEquals(True, mock_homeserver.is_mine(room)) + self.assertEquals("test", room.domain) + self.assertEquals(True, self.hs.is_mine(room)) def test_build(self): room = RoomAlias("channel", "my.domain") @@ -106,3 +110,16 @@ class MapUsernameTestCase(unittest.TestCase): self.assertEqual( map_username_to_mxid_localpart("têst".encode("utf-8")), "t=c3=aast" ) + + +class StripInvalidMxidCharactersTestCase(unittest.TestCase): + def test_return_type(self): + unstripped = strip_invalid_mxid_characters("test") + stripped = strip_invalid_mxid_characters("test@") + + self.assertTrue(isinstance(unstripped, string_types), type(unstripped)) + self.assertTrue(isinstance(stripped, string_types), type(stripped)) + + def test_strip(self): + stripped = strip_invalid_mxid_characters("test@") + self.assertEqual(stripped, "test", stripped) diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py
index 8b8455c8b7..281b32c4b8 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py
@@ -179,6 +179,30 @@ class LoggingContextTestCase(unittest.TestCase): nested_context = nested_logging_context(suffix="bar") self.assertEqual(nested_context.request, "foo-bar") + @defer.inlineCallbacks + def test_make_deferred_yieldable_with_await(self): + # an async function which retuns an incomplete coroutine, but doesn't + # follow the synapse rules. + + async def blocking_function(): + d = defer.Deferred() + reactor.callLater(0, d.callback, None) + await d + + sentinel_context = LoggingContext.current_context() + + with LoggingContext() as context_one: + context_one.request = "one" + + d1 = make_deferred_yieldable(blocking_function()) + # make sure that the context was reset by make_deferred_yieldable + self.assertIs(LoggingContext.current_context(), sentinel_context) + + yield d1 + + # now it should be restored + self._check_test_key("one") + # a function which returns a deferred which has been "called", but # which had a function which returned another incomplete deferred on diff --git a/tests/util/test_snapshot_cache.py b/tests/util/test_snapshot_cache.py deleted file mode 100644
index 1a44f72425..0000000000 --- a/tests/util/test_snapshot_cache.py +++ /dev/null
@@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from twisted.internet.defer import Deferred - -from synapse.util.caches.snapshot_cache import SnapshotCache - -from .. import unittest - - -class SnapshotCacheTestCase(unittest.TestCase): - def setUp(self): - self.cache = SnapshotCache() - self.cache.DURATION_MS = 1 - - def test_get_set(self): - # Check that getting a missing key returns None - self.assertEquals(self.cache.get(0, "key"), None) - - # Check that setting a key with a deferred returns - # a deferred that resolves when the initial deferred does - d = Deferred() - set_result = self.cache.set(0, "key", d) - self.assertIsNotNone(set_result) - self.assertFalse(set_result.called) - - # Check that getting the key before the deferred has resolved - # returns a deferred that resolves when the initial deferred does. - get_result_at_10 = self.cache.get(10, "key") - self.assertIsNotNone(get_result_at_10) - self.assertFalse(get_result_at_10.called) - - # Check that the returned deferreds resolve when the initial deferred - # does. - d.callback("v") - self.assertTrue(set_result.called) - self.assertTrue(get_result_at_10.called) - - # Check that getting the key after the deferred has resolved - # before the cache expires returns a resolved deferred. - get_result_at_11 = self.cache.get(11, "key") - self.assertIsNotNone(get_result_at_11) - if isinstance(get_result_at_11, Deferred): - # The cache may return the actual result rather than a deferred - self.assertTrue(get_result_at_11.called) - - # Check that getting the key after the deferred has resolved - # after the cache expires returns None - get_result_at_12 = self.cache.get(12, "key") - self.assertIsNone(get_result_at_12) diff --git a/tests/utils.py b/tests/utils.py
index c57da59191..585f305b9a 100644 --- a/tests/utils.py +++ b/tests/utils.py
@@ -260,9 +260,7 @@ def setup_test_homeserver( hs = homeserverToUse( name, config=config, - db_config=config.database_config, version_string="Synapse/tests", - database_engine=db_engine, tls_server_context_factory=Mock(), tls_client_options_factory=Mock(), reactor=reactor, diff --git a/tox.ini b/tox.ini
index 903a245fb0..10d88e259c 100644 --- a/tox.ini +++ b/tox.ini
@@ -138,7 +138,7 @@ commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests scripts-dev skip_install = True deps = towncrier>=18.6.0rc1 commands = - python -m towncrier.check --compare-with=origin/develop + python -m towncrier.check --compare-with=origin/dinsic basepython = python3.6 [testenv:check-sampleconfig] @@ -171,11 +171,23 @@ basepython = python3.7 skip_install = True deps = {[base]deps} - mypy==0.730 + mypy==0.750 mypy-zope env = MYPYPATH = stubs/ extras = all commands = mypy \ + synapse/config/ \ + synapse/handlers/ui_auth \ synapse/logging/ \ - synapse/config/ + synapse/module_api \ + synapse/rest/consent \ + synapse/rest/media/v0 \ + synapse/rest/saml2 \ + synapse/spam_checker_api \ + synapse/storage/engines \ + synapse/streams + +# To find all folders that pass mypy you run: +# +# find synapse/* -type d -not -name __pycache__ -exec bash -c "mypy '{}' > /dev/null" \; -print