diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
new file mode 100644
index 0000000000..ff05a7428b
--- /dev/null
+++ b/.buildkite/pipeline.yml
@@ -0,0 +1,310 @@
+env:
+ COVERALLS_REPO_TOKEN: wsJWOby6j0uCYFiCes3r0XauxO27mx8lD
+
+steps:
+ - command:
+ - "python -m pip install tox"
+ - "tox -e check_codestyle"
+ label: "\U0001F9F9 Check Style"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+ mount-buildkite-agent: false
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e packaging"
+ label: "\U0001F9F9 packaging"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+ mount-buildkite-agent: false
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e check_isort"
+ label: "\U0001F9F9 isort"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+ mount-buildkite-agent: false
+
+ - command:
+ - "python -m pip install tox"
+ - "scripts-dev/check-newsfragment"
+ label: ":newspaper: Newsfile"
+ branches: "!master !develop !release-*"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+ propagate-environment: true
+ mount-buildkite-agent: false
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e check-sampleconfig"
+ label: "\U0001F9F9 check-sample-config"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+ mount-buildkite-agent: false
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e mypy"
+ label: ":mypy: mypy"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.5"
+ mount-buildkite-agent: false
+
+ - wait
+
+ - command:
+ - "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev"
+ - "python3.5 -m pip install tox"
+ - "tox -e py35-old,combine"
+ label: ":python: 3.5 / SQLite / Old Deps"
+ env:
+ TRIAL_FLAGS: "-j 2"
+ LANG: "C.UTF-8"
+ plugins:
+ - docker#v3.0.1:
+ image: "ubuntu:xenial" # We use xenial to get an old sqlite and python
+ workdir: "/src"
+ mount-buildkite-agent: false
+ propagate-environment: true
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 2
+ - exit_status: 2
+ limit: 2
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e py35,combine"
+ label: ":python: 3.5 / SQLite"
+ env:
+ TRIAL_FLAGS: "-j 2"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.5"
+ workdir: "/src"
+ mount-buildkite-agent: false
+ propagate-environment: true
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 2
+ - exit_status: 2
+ limit: 2
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e py36,combine"
+ label: ":python: 3.6 / SQLite"
+ env:
+ TRIAL_FLAGS: "-j 2"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.6"
+ workdir: "/src"
+ mount-buildkite-agent: false
+ propagate-environment: true
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 2
+ - exit_status: 2
+ limit: 2
+
+ - command:
+ - "python -m pip install tox"
+ - "tox -e py37,combine"
+ label: ":python: 3.7 / SQLite"
+ env:
+ TRIAL_FLAGS: "-j 2"
+ plugins:
+ - docker#v3.0.1:
+ image: "python:3.7"
+ workdir: "/src"
+ mount-buildkite-agent: false
+ propagate-environment: true
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 2
+ - exit_status: 2
+ limit: 2
+
+ - label: ":python: 3.5 / :postgres: 9.5"
+ agents:
+ queue: "medium"
+ env:
+ TRIAL_FLAGS: "-j 8"
+ command:
+ - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,combine'"
+ plugins:
+ - docker-compose#v2.1.0:
+ run: testenv
+ config:
+ - .buildkite/docker-compose.py35.pg95.yaml
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 2
+ - exit_status: 2
+ limit: 2
+
+ - label: ":python: 3.7 / :postgres: 9.5"
+ agents:
+ queue: "medium"
+ env:
+ TRIAL_FLAGS: "-j 8"
+ command:
+ - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'"
+ plugins:
+ - docker-compose#v2.1.0:
+ run: testenv
+ config:
+ - .buildkite/docker-compose.py37.pg95.yaml
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 2
+ - exit_status: 2
+ limit: 2
+
+ - label: ":python: 3.7 / :postgres: 11"
+ agents:
+ queue: "medium"
+ env:
+ TRIAL_FLAGS: "-j 8"
+ command:
+ - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,combine'"
+ plugins:
+ - docker-compose#v2.1.0:
+ run: testenv
+ config:
+ - .buildkite/docker-compose.py37.pg11.yaml
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 2
+ - exit_status: 2
+ limit: 2
+
+ - label: "SyTest - :python: 3.5 / SQLite / Monolith"
+ agents:
+ queue: "medium"
+ command:
+ - "bash .buildkite/merge_base_branch.sh"
+ - "bash /synapse_sytest.sh"
+ plugins:
+ - docker#v3.0.1:
+ image: "matrixdotorg/sytest-synapse:py35"
+ propagate-environment: true
+ always-pull: true
+ workdir: "/src"
+ entrypoint: ["/bin/sh", "-e", "-c"]
+ mount-buildkite-agent: false
+ volumes: ["./logs:/logs"]
+ - artifacts#v1.2.0:
+ upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ]
+ - matrix-org/annotate:
+ path: "logs/annotate.md"
+ class: "error"
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 2
+ - exit_status: 2
+ limit: 2
+
+ - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Monolith"
+ agents:
+ queue: "xlarge"
+ env:
+ POSTGRES: "1"
+ command:
+ - "bash .buildkite/merge_base_branch.sh"
+ - "bash /synapse_sytest.sh"
+ plugins:
+ - docker#v3.0.1:
+ image: "matrixdotorg/sytest-synapse:dinsic-py3"
+ propagate-environment: true
+ always-pull: true
+ workdir: "/src"
+ entrypoint: ["/bin/sh", "-e", "-c"]
+ mount-buildkite-agent: false
+ volumes: ["./logs:/logs"]
+ - artifacts#v1.2.0:
+ upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ]
+ - matrix-org/annotate:
+ path: "logs/annotate.md"
+ class: "error"
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 2
+ - exit_status: 2
+ limit: 2
+
+ - label: "SyTest - :python: 3 / :postgres: 9.6 / Workers"
+ agents:
+ queue: "medium"
+ env:
+ POSTGRES: "1"
+ WORKERS: "1"
+ BLACKLIST: "synapse-blacklist-with-workers"
+ command:
+ - "bash .buildkite/merge_base_branch.sh"
+ - "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'"
+ - "bash /synapse_sytest.sh"
+ plugins:
+ - docker#v3.0.1:
+ image: "matrixdotorg/sytest-synapse:dinsic-py3"
+ propagate-environment: true
+ always-pull: true
+ workdir: "/src"
+ entrypoint: ["/bin/sh", "-e", "-c"]
+ mount-buildkite-agent: false
+ volumes: ["./logs:/logs"]
+ - artifacts#v1.2.0:
+ upload: [ "logs/**/*.log", "logs/**/*.log.*", "logs/coverage.xml" ]
+ - matrix-org/annotate:
+ path: "logs/annotate.md"
+ class: "error"
+ - matrix-org/coveralls#v1.0:
+ parallel: "true"
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 2
+ - exit_status: 2
+ limit: 2
+
+ - wait: ~
+ continue_on_failure: true
+
+ - label: Trigger webhook
+ command: "curl -k https://coveralls.io/webhook?repo_token=$COVERALLS_REPO_TOKEN -d \"payload[build_num]=$BUILDKITE_BUILD_NUMBER&payload[status]=done\""
diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist
index 158ab79154..094b6c94da 100644
--- a/.buildkite/worker-blacklist
+++ b/.buildkite/worker-blacklist
@@ -39,3 +39,5 @@ Server correctly handles incoming m.device_list_update
# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536
Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state
+
+Can get rooms/{roomId}/members at a given point
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5736ede6c4..4b01b6ac8c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -200,6 +200,20 @@ Git allows you to add this signoff automatically when using the `-s`
flag to `git commit`, which uses the name and email set in your
`user.name` and `user.email` git configs.
+## Merge Strategy
+
+We use the commit history of develop/master extensively to identify
+when regressions were introduced and what changes have been made.
+
+We aim to have a clean merge history, which means we normally squash-merge
+changes into develop. For small changes this means there is no need to rebase
+to clean up your PR before merging. Larger changes with an organised set of
+commits may be merged as-is, if the history is judged to be useful.
+
+This use of squash-merging will mean PRs built on each other will be hard to
+merge. We suggest avoiding these where possible, and if required, ensuring
+each PR has a tidy set of commits to ease merging.
+
## Conclusion
That's it! Matrix is a very open and collaborative project as you might expect
diff --git a/MANIFEST.in b/MANIFEST.in
index 156d6f04f7..5eb8e62d34 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,5 @@
include synctl
+include sytest-blacklist
include LICENSE
include VERSION
include *.rst
@@ -50,3 +51,11 @@ prune docker
prune mypy.ini
prune snap
prune stubs
+
+exclude jenkins*
+recursive-exclude jenkins *.sh
+
+# FIXME: we shouldn't have these templates here
+recursive-include res/templates-dinsic *.css
+recursive-include res/templates-dinsic *.html
+recursive-include res/templates-dinsic *.txt
diff --git a/README.rst b/README.rst
index 2691dfc23d..4db7d17e94 100644
--- a/README.rst
+++ b/README.rst
@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
virtualenv -p python3 env
source env/bin/activate
- python -m pip install --no-use-pep517 -e .[all]
+ python -m pip install --no-use-pep517 -e ".[all]"
This will run a process of downloading and installing all the needed
dependencies into a virtual env.
diff --git a/changelog.d/1.feature b/changelog.d/1.feature
new file mode 100644
index 0000000000..845642e445
--- /dev/null
+++ b/changelog.d/1.feature
@@ -0,0 +1 @@
+Forbid changing the name, avatar or topic of a direct room.
diff --git a/changelog.d/10.bugfix b/changelog.d/10.bugfix
new file mode 100644
index 0000000000..51f89f46dd
--- /dev/null
+++ b/changelog.d/10.bugfix
@@ -0,0 +1 @@
+Don't apply retention policy based filtering on state events.
diff --git a/changelog.d/11.feature b/changelog.d/11.feature
new file mode 100644
index 0000000000..362e4b1efd
--- /dev/null
+++ b/changelog.d/11.feature
@@ -0,0 +1 @@
+Allow server admins to configure a custom global rate-limiting for third party invites.
\ No newline at end of file
diff --git a/changelog.d/12.feature b/changelog.d/12.feature
new file mode 100644
index 0000000000..8e6e7a28af
--- /dev/null
+++ b/changelog.d/12.feature
@@ -0,0 +1 @@
+Add `/user/:user_id/info` CS servlet and to give user deactivated/expired information.
\ No newline at end of file
diff --git a/changelog.d/13.feature b/changelog.d/13.feature
new file mode 100644
index 0000000000..c2d2e93abf
--- /dev/null
+++ b/changelog.d/13.feature
@@ -0,0 +1 @@
+Hide expired users from the user directory, and optionally re-add them on renewal.
\ No newline at end of file
diff --git a/changelog.d/14.feature b/changelog.d/14.feature
new file mode 100644
index 0000000000..020d0bac1e
--- /dev/null
+++ b/changelog.d/14.feature
@@ -0,0 +1 @@
+User displaynames now have capitalised letters after - symbols.
\ No newline at end of file
diff --git a/changelog.d/15.misc b/changelog.d/15.misc
new file mode 100644
index 0000000000..4cc4a5175f
--- /dev/null
+++ b/changelog.d/15.misc
@@ -0,0 +1 @@
+Fix the ordering on `scripts/generate_signing_key.py`'s import statement.
diff --git a/changelog.d/17.misc b/changelog.d/17.misc
new file mode 100644
index 0000000000..58120ab5c7
--- /dev/null
+++ b/changelog.d/17.misc
@@ -0,0 +1 @@
+Blacklist some flaky sytests until they're fixed.
\ No newline at end of file
diff --git a/changelog.d/18.feature b/changelog.d/18.feature
new file mode 100644
index 0000000000..f5aa29a6e8
--- /dev/null
+++ b/changelog.d/18.feature
@@ -0,0 +1 @@
+Add option `limit_profile_requests_to_known_users` to prevent requirement of a user sharing a room with another user to query their profile information.
\ No newline at end of file
diff --git a/changelog.d/19.feature b/changelog.d/19.feature
new file mode 100644
index 0000000000..95a44a4a89
--- /dev/null
+++ b/changelog.d/19.feature
@@ -0,0 +1 @@
+Add `max_avatar_size` and `allowed_avatar_mimetypes` to restrict the size of user avatars and their file type respectively.
\ No newline at end of file
diff --git a/changelog.d/2.bugfix b/changelog.d/2.bugfix
new file mode 100644
index 0000000000..4fe5691468
--- /dev/null
+++ b/changelog.d/2.bugfix
@@ -0,0 +1 @@
+Don't treat 3PID revocation as a new 3PID invite.
diff --git a/changelog.d/20.bugfix b/changelog.d/20.bugfix
new file mode 100644
index 0000000000..8ba53c28f9
--- /dev/null
+++ b/changelog.d/20.bugfix
@@ -0,0 +1 @@
+Validate `client_secret` parameter against the regex provided by the C-S spec.
\ No newline at end of file
diff --git a/changelog.d/21.bugfix b/changelog.d/21.bugfix
new file mode 100644
index 0000000000..630d7812f7
--- /dev/null
+++ b/changelog.d/21.bugfix
@@ -0,0 +1 @@
+Fix resetting user passwords via a phone number.
diff --git a/changelog.d/3.bugfix b/changelog.d/3.bugfix
new file mode 100644
index 0000000000..cc4bcefa80
--- /dev/null
+++ b/changelog.d/3.bugfix
@@ -0,0 +1 @@
+Fix encoding on password reset HTML responses in Python 2.
diff --git a/changelog.d/4.bugfix b/changelog.d/4.bugfix
new file mode 100644
index 0000000000..fe717920a6
--- /dev/null
+++ b/changelog.d/4.bugfix
@@ -0,0 +1 @@
+Fix handling of filtered strings in Python 3.
diff --git a/changelog.d/5.bugfix b/changelog.d/5.bugfix
new file mode 100644
index 0000000000..53f57f46ca
--- /dev/null
+++ b/changelog.d/5.bugfix
@@ -0,0 +1 @@
+Fix room retention policy management in worker mode.
diff --git a/changelog.d/5083.feature b/changelog.d/5083.feature
new file mode 100644
index 0000000000..2ffdd37eef
--- /dev/null
+++ b/changelog.d/5083.feature
@@ -0,0 +1 @@
+Adds auth_profile_reqs option to require access_token to GET /profile endpoints on CS API.
diff --git a/changelog.d/5098.misc b/changelog.d/5098.misc
new file mode 100644
index 0000000000..9cd83bf226
--- /dev/null
+++ b/changelog.d/5098.misc
@@ -0,0 +1 @@
+Add workarounds for pep-517 install errors.
diff --git a/changelog.d/5214.feature b/changelog.d/5214.feature
new file mode 100644
index 0000000000..6c0f15c901
--- /dev/null
+++ b/changelog.d/5214.feature
@@ -0,0 +1 @@
+Allow server admins to define and enforce a password policy (MSC2000).
diff --git a/changelog.d/5416.misc b/changelog.d/5416.misc
new file mode 100644
index 0000000000..155e8c7cd3
--- /dev/null
+++ b/changelog.d/5416.misc
@@ -0,0 +1 @@
+Add unique index to the profile_replication_status table.
diff --git a/changelog.d/5420.feature b/changelog.d/5420.feature
new file mode 100644
index 0000000000..745864b903
--- /dev/null
+++ b/changelog.d/5420.feature
@@ -0,0 +1 @@
+Add configuration option to hide new users from the user directory.
diff --git a/changelog.d/5610.feature b/changelog.d/5610.feature
new file mode 100644
index 0000000000..b99514f97e
--- /dev/null
+++ b/changelog.d/5610.feature
@@ -0,0 +1 @@
+Implement new custom event rules for power levels.
diff --git a/changelog.d/5702.bugfix b/changelog.d/5702.bugfix
new file mode 100644
index 0000000000..43b6e39b13
--- /dev/null
+++ b/changelog.d/5702.bugfix
@@ -0,0 +1 @@
+Fix 3PID invite to invite association detection in the Tchap room access rules.
diff --git a/changelog.d/5760.feature b/changelog.d/5760.feature
new file mode 100644
index 0000000000..90302d793e
--- /dev/null
+++ b/changelog.d/5760.feature
@@ -0,0 +1 @@
+Force the access rule to be "restricted" if the join rule is "public".
diff --git a/changelog.d/6.bugfix b/changelog.d/6.bugfix
new file mode 100644
index 0000000000..43ab65cc95
--- /dev/null
+++ b/changelog.d/6.bugfix
@@ -0,0 +1 @@
+Don't forbid membership events which membership isn't 'join' or 'invite' in restricted rooms, so that users who got into these rooms before the access rules started to be enforced can leave them.
diff --git a/changelog.d/6769.feature b/changelog.d/6769.feature
new file mode 100644
index 0000000000..8a60e12907
--- /dev/null
+++ b/changelog.d/6769.feature
@@ -0,0 +1 @@
+Admin API to add or modify threepids of user accounts.
\ No newline at end of file
diff --git a/changelog.d/6781.bugfix b/changelog.d/6781.bugfix
new file mode 100644
index 0000000000..47cd671bff
--- /dev/null
+++ b/changelog.d/6781.bugfix
@@ -0,0 +1 @@
+Fixed third party event rules function `on_create_room`'s return value being ignored.
diff --git a/changelog.d/6821.misc b/changelog.d/6821.misc
new file mode 100644
index 0000000000..1d5265d5e2
--- /dev/null
+++ b/changelog.d/6821.misc
@@ -0,0 +1 @@
+Add type hints to `SyncHandler`.
diff --git a/changelog.d/6823.misc b/changelog.d/6823.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6823.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6825.bugfix b/changelog.d/6825.bugfix
new file mode 100644
index 0000000000..d3cacd6d9a
--- /dev/null
+++ b/changelog.d/6825.bugfix
@@ -0,0 +1 @@
+Allow URL-encoded User IDs on `/_synapse/admin/v2/users/<user_id>[/admin]` endpoints. Thanks to @NHAS for reporting.
\ No newline at end of file
diff --git a/changelog.d/6827.misc b/changelog.d/6827.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6827.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6833.misc b/changelog.d/6833.misc
new file mode 100644
index 0000000000..8a0605f90b
--- /dev/null
+++ b/changelog.d/6833.misc
@@ -0,0 +1 @@
+Reducing log level to DEBUG for synapse.storage.TIME.
diff --git a/changelog.d/6836.misc b/changelog.d/6836.misc
new file mode 100644
index 0000000000..232488e1e5
--- /dev/null
+++ b/changelog.d/6836.misc
@@ -0,0 +1 @@
+Fix stacktraces when using `ObservableDeferred` and async/await.
diff --git a/changelog.d/6837.misc b/changelog.d/6837.misc
new file mode 100644
index 0000000000..0496f12de8
--- /dev/null
+++ b/changelog.d/6837.misc
@@ -0,0 +1 @@
+Port much of `synapse.handlers.federation` to async/await.
diff --git a/changelog.d/6840.misc b/changelog.d/6840.misc
new file mode 100644
index 0000000000..0496f12de8
--- /dev/null
+++ b/changelog.d/6840.misc
@@ -0,0 +1 @@
+Port much of `synapse.handlers.federation` to async/await.
diff --git a/changelog.d/6846.doc b/changelog.d/6846.doc
new file mode 100644
index 0000000000..ad69d608c0
--- /dev/null
+++ b/changelog.d/6846.doc
@@ -0,0 +1 @@
+Add details of PR merge strategy to contributing docs.
\ No newline at end of file
diff --git a/changelog.d/6847.misc b/changelog.d/6847.misc
new file mode 100644
index 0000000000..094e911adb
--- /dev/null
+++ b/changelog.d/6847.misc
@@ -0,0 +1 @@
+Populate `rooms.room_version` database column at startup, rather than in a background update.
diff --git a/changelog.d/6849.bugfix b/changelog.d/6849.bugfix
new file mode 100644
index 0000000000..d928a26ec6
--- /dev/null
+++ b/changelog.d/6849.bugfix
@@ -0,0 +1 @@
+Fix Synapse refusing to start if `federation_certificate_verification_whitelist` option is blank.
diff --git a/changelog.d/6854.misc b/changelog.d/6854.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6854.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6855.misc b/changelog.d/6855.misc
new file mode 100644
index 0000000000..904361ddfb
--- /dev/null
+++ b/changelog.d/6855.misc
@@ -0,0 +1 @@
+Update pip install directiosn in readme to avoid error when using zsh.
diff --git a/changelog.d/6856.misc b/changelog.d/6856.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6856.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6857.misc b/changelog.d/6857.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6857.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6858.misc b/changelog.d/6858.misc
new file mode 100644
index 0000000000..08aa80bcd9
--- /dev/null
+++ b/changelog.d/6858.misc
@@ -0,0 +1 @@
+Refactoring work in preparation for changing the event redaction algorithm.
diff --git a/changelog.d/6862.misc b/changelog.d/6862.misc
new file mode 100644
index 0000000000..83626d2939
--- /dev/null
+++ b/changelog.d/6862.misc
@@ -0,0 +1 @@
+Reduce amount we log at `INFO` level.
diff --git a/changelog.d/6864.misc b/changelog.d/6864.misc
new file mode 100644
index 0000000000..d24eb68460
--- /dev/null
+++ b/changelog.d/6864.misc
@@ -0,0 +1 @@
+Limit the number of events that can be requested by the backfill federation API to 100.
diff --git a/changelog.d/6866.feature b/changelog.d/6866.feature
new file mode 100644
index 0000000000..256feab6ff
--- /dev/null
+++ b/changelog.d/6866.feature
@@ -0,0 +1 @@
+Add ability to run some group APIs on workers.
diff --git a/changelog.d/6869.misc b/changelog.d/6869.misc
new file mode 100644
index 0000000000..14f88f9bb7
--- /dev/null
+++ b/changelog.d/6869.misc
@@ -0,0 +1 @@
+Remove unused `get_room_stats_state` method.
diff --git a/changelog.d/6871.misc b/changelog.d/6871.misc
new file mode 100644
index 0000000000..5161af9983
--- /dev/null
+++ b/changelog.d/6871.misc
@@ -0,0 +1 @@
+Add typing to `synapse.federation.sender` and port to async/await.
diff --git a/changelog.d/6873.feature b/changelog.d/6873.feature
new file mode 100644
index 0000000000..bbedf8f7f0
--- /dev/null
+++ b/changelog.d/6873.feature
@@ -0,0 +1 @@
+Add ability to route federation user device queries to workers.
diff --git a/changelog.d/6882.misc b/changelog.d/6882.misc
new file mode 100644
index 0000000000..e8382e36ae
--- /dev/null
+++ b/changelog.d/6882.misc
@@ -0,0 +1 @@
+Reject device display names over 100 characters in length.
diff --git a/changelog.d/6883.misc b/changelog.d/6883.misc
new file mode 100644
index 0000000000..e0837d7987
--- /dev/null
+++ b/changelog.d/6883.misc
@@ -0,0 +1 @@
+Add an additional entry to the SyTest blacklist for worker mode.
diff --git a/changelog.d/9.misc b/changelog.d/9.misc
new file mode 100644
index 0000000000..24fd12c978
--- /dev/null
+++ b/changelog.d/9.misc
@@ -0,0 +1 @@
+Add SyTest to the BuildKite CI.
diff --git a/contrib/systemd/README.md b/contrib/systemd/README.md
deleted file mode 100644
index 5d42b3464f..0000000000
--- a/contrib/systemd/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Setup Synapse with Systemd
-This is a setup for managing synapse with a user contributed systemd unit
-file. It provides a `matrix-synapse` systemd unit file that should be tailored
-to accommodate your installation in accordance with the installation
-instructions provided in [installation instructions](../../INSTALL.md).
-
-## Setup
-1. Under the service section, ensure the `User` variable matches which user
-you installed synapse under and wish to run it as.
-2. Under the service section, ensure the `WorkingDirectory` variable matches
-where you have installed synapse.
-3. Under the service section, ensure the `ExecStart` variable matches the
-appropriate locations of your installation.
-4. Copy the `matrix-synapse.service` to `/etc/systemd/system/`
-5. Start Synapse: `sudo systemctl start matrix-synapse`
-6. Verify Synapse is running: `sudo systemctl status matrix-synapse`
-7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse`
diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst
index 0b3d09d694..6b02d963e6 100644
--- a/docs/admin_api/user_admin_api.rst
+++ b/docs/admin_api/user_admin_api.rst
@@ -2,7 +2,8 @@ Create or modify Account
========================
This API allows an administrator to create or modify a user account with a
-specific ``user_id``.
+specific ``user_id``. Be aware that ``user_id`` is fully qualified: for example,
+``@user:server.com``.
This api is::
@@ -15,6 +16,16 @@ with a body of:
{
"password": "user_password",
"displayname": "User",
+ "threepids": [
+ {
+ "medium": "email",
+ "address": "<user_mail_1>"
+ },
+ {
+ "medium": "email",
+ "address": "<user_mail_2>"
+ }
+ ],
"avatar_url": "<avatar_url>",
"admin": false,
"deactivated": false
@@ -23,6 +34,7 @@ with a body of:
including an ``access_token`` of a server admin.
The parameter ``displayname`` is optional and defaults to ``user_id``.
+The parameter ``threepids`` is optional.
The parameter ``avatar_url`` is optional.
The parameter ``admin`` is optional and defaults to 'false'.
The parameter ``deactivated`` is optional and defaults to 'false'.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 8e8cf513b0..66f8fec082 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -323,6 +323,74 @@ listeners:
#
#allow_per_room_profiles: false
+# Whether to show the users on this homeserver in the user directory. Defaults to
+# 'true'.
+#
+#show_users_in_user_directory: false
+
+# Message retention policy at the server level.
+#
+# Room admins and mods can define a retention period for their rooms using the
+# 'm.room.retention' state event, and server admins can cap this period by setting
+# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
+#
+# If this feature is enabled, Synapse will regularly look for and purge events
+# which are older than the room's maximum retention period. Synapse will also
+# filter events received over federation so that events that should have been
+# purged are ignored and not stored again.
+#
+retention:
+ # The message retention policies feature is disabled by default. Uncomment the
+ # following line to enable it.
+ #
+ #enabled: true
+
+ # Default retention policy. If set, Synapse will apply it to rooms that lack the
+ # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
+ # matter much because Synapse doesn't take it into account yet.
+ #
+ #default_policy:
+ # min_lifetime: 1d
+ # max_lifetime: 1y
+
+ # Retention policy limits. If set, a user won't be able to send a
+ # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
+ # that's not within this range. This is especially useful in closed federations,
+ # in which server admins can make sure every federating server applies the same
+ # rules.
+ #
+ #allowed_lifetime_min: 1d
+ #allowed_lifetime_max: 1y
+
+ # Server admins can define the settings of the background jobs purging the
+ # events which lifetime has expired under the 'purge_jobs' section.
+ #
+ # If no configuration is provided, a single job will be set up to delete expired
+ # events in every room daily.
+ #
+ # Each job's configuration defines which range of message lifetimes the job
+ # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
+ # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
+ # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
+ # lower than or equal to 3 days. Both the minimum and the maximum value of a
+ # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
+ # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
+ # which 'max_lifetime' is lower than or equal to three days.
+ #
+ # The rationale for this per-job configuration is that some rooms might have a
+ # retention policy with a low 'max_lifetime', where history needs to be purged
+ # of outdated messages on a very frequent basis (e.g. every 5min), but not want
+ # that purge to be performed by a job that's iterating over every room it knows,
+ # which would be quite heavy on the server.
+ #
+ #purge_jobs:
+ # - shortest_max_lifetime: 1d
+ # longest_max_lifetime: 3d
+ # interval: 5m:
+ # - shortest_max_lifetime: 3d
+ # longest_max_lifetime: 1y
+ # interval: 24h
+
# How long to keep redacted events in unredacted form in the database. After
# this period redacted events get replaced with their redacted form in the DB.
#
@@ -604,6 +672,8 @@ log_config: "CONFDIR/SERVERNAME.log.config"
# - one for login that ratelimits login requests based on the account the
# client is attempting to log into, based on the amount of failed login
# attempts for this account.
+# - one that ratelimits third-party invites requests based on the account
+# that's making the requests.
# - one for ratelimiting redactions by room admins. If this is not explicitly
# set then it uses the same ratelimiting as per rc_message. This is useful
# to allow room admins to deal with abuse quickly.
@@ -629,6 +699,10 @@ log_config: "CONFDIR/SERVERNAME.log.config"
# per_second: 0.17
# burst_count: 3
#
+#rc_third_party_invite:
+# per_second: 0.2
+# burst_count: 10
+#
#rc_admin_redaction:
# per_second: 1
# burst_count: 50
@@ -696,6 +770,30 @@ media_store_path: "DATADIR/media_store"
#
#max_upload_size: 10M
+# The largest allowed size for a user avatar. If not defined, no
+# restriction will be imposed.
+#
+# Note that this only applies when an avatar is changed globally.
+# Per-room avatar changes are not affected. See allow_per_room_profiles
+# for disabling that functionality.
+#
+# Note that user avatar changes will not work if this is set without
+# using Synapse's local media repo.
+#
+#max_avatar_size: 10M
+
+# Allow mimetypes for a user avatar. If not defined, no restriction will
+# be imposed.
+#
+# Note that this only applies when an avatar is changed globally.
+# Per-room avatar changes are not affected. See allow_per_room_profiles
+# for disabling that functionality.
+#
+# Note that user avatar changes will not work if this is set without
+# using Synapse's local media repo.
+#
+#allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"]
+
# Maximum number of pixels that will be thumbnailed
#
#max_image_pixels: 32M
@@ -956,9 +1054,32 @@ account_validity:
#
#disable_msisdn_registration: true
+# Derive the user's matrix ID from a type of 3PID used when registering.
+# This overrides any matrix ID the user proposes when calling /register
+# The 3PID type should be present in registrations_require_3pid to avoid
+# users failing to register if they don't specify the right kind of 3pid.
+#
+#register_mxid_from_3pid: email
+
+# Uncomment to set the display name of new users to their email address,
+# rather than using the default heuristic.
+#
+#register_just_use_email_for_display_name: true
+
# Mandate that users are only allowed to associate certain formats of
# 3PIDs with accounts on this server.
#
+# Use an Identity Server to establish which 3PIDs are allowed to register?
+# Overrides allowed_local_3pids below.
+#
+#check_is_for_allowed_local_3pids: matrix.org
+#
+# If you are using an IS you can also check whether that IS registers
+# pending invites for the given 3PID (and then allow it to sign up on
+# the platform):
+#
+#allow_invited_3pids: False
+#
#allowed_local_3pids:
# - medium: email
# pattern: '.*@matrix\.org'
@@ -967,6 +1088,11 @@ account_validity:
# - medium: msisdn
# pattern: '\+44'
+# If true, stop users from trying to change the 3PIDs associated with
+# their accounts.
+#
+#disable_3pid_changes: False
+
# Enable 3PIDs lookup requests to identity servers from this server.
#
#enable_3pid_lookup: true
@@ -1016,6 +1142,30 @@ account_validity:
# - matrix.org
# - vector.im
+# If enabled, user IDs, display names and avatar URLs will be replicated
+# to this server whenever they change.
+# This is an experimental API currently implemented by sydent to support
+# cross-homeserver user directories.
+#
+#replicate_user_profiles_to: example.com
+
+# If specified, attempt to replay registrations, profile changes & 3pid
+# bindings on the given target homeserver via the AS API. The HS is authed
+# via a given AS token.
+#
+#shadow_server:
+# hs_url: https://shadow.example.com
+# hs: shadow.example.com
+# as_token: 12u394refgbdhivsia
+
+# If enabled, don't let users set their own display names/avatars
+# other than for the very first time (unless they are a server admin).
+# Useful when provisioning users based on the contents of a 3rd party
+# directory and to avoid ambiguities.
+#
+#disable_set_displayname: False
+#disable_set_avatar_url: False
+
# Handle threepid (email/phone etc) registration and password resets through a set of
# *trusted* identity servers. Note that this allows the configured identity server to
# reset passwords for accounts!
@@ -1369,6 +1519,36 @@ password_config:
#
#pepper: "EVEN_MORE_SECRET"
+ # Define and enforce a password policy. Each parameter is optional, boolean
+ # parameters default to 'false' and integer parameters default to 0.
+ # This is an early implementation of MSC2000.
+ #
+ #policy:
+ # Whether to enforce the password policy.
+ #
+ #enabled: true
+
+ # Minimum accepted length for a password.
+ #
+ #minimum_length: 15
+
+ # Whether a password must contain at least one digit.
+ #
+ #require_digit: true
+
+ # Whether a password must contain at least one symbol.
+ # A symbol is any character that's not a number or a letter.
+ #
+ #require_symbol: true
+
+ # Whether a password must contain at least one lowercase letter.
+ #
+ #require_lowercase: true
+
+ # Whether a password must contain at least one lowercase letter.
+ #
+ #require_uppercase: true
+
# Configuration for sending emails from Synapse.
#
@@ -1541,6 +1721,11 @@ email:
#user_directory:
# enabled: true
# search_all_users: false
+#
+# # If this is set, user search will be delegated to this ID server instead
+# # of synapse performing the search itself.
+# # This is an experimental API.
+# defer_to_id_server: https://id.example.com
# User Consent configuration
diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py
index ca4b879526..5c5a115ca9 100644
--- a/docs/sphinx/conf.py
+++ b/docs/sphinx/conf.py
@@ -12,8 +12,8 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import sys
import os
+import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@@ -191,11 +191,11 @@ htmlhelp_basename = "Synapsedoc"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
- #'papersize': 'letterpaper',
+ # 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
- #'pointsize': '10pt',
+ # 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
- #'preamble': '',
+ # 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
diff --git a/docs/workers.md b/docs/workers.md
index 09a9d8a7b8..6f7ec58780 100644
--- a/docs/workers.md
+++ b/docs/workers.md
@@ -176,9 +176,15 @@ endpoints matching the following regular expressions:
^/_matrix/federation/v1/query_auth/
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/exchange_third_party_invite/
+ ^/_matrix/federation/v1/user/devices/
^/_matrix/federation/v1/send/
+ ^/_matrix/federation/v1/get_groups_publicised$
^/_matrix/key/v2/query
+Additionally, the following REST endpoints can be handled for GET requests:
+
+ ^/_matrix/federation/v1/groups/
+
The above endpoints should all be routed to the federation_reader worker by the
reverse-proxy configuration.
@@ -254,10 +260,13 @@ following regular expressions:
^/_matrix/client/(api/v1|r0|unstable)/keys/changes$
^/_matrix/client/versions$
^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$
+ ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
+ ^/_matrix/client/(api/v1|r0|unstable)/get_groups_publicised$
Additionally, the following REST endpoints can be handled for GET requests:
^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$
+ ^/_matrix/client/(api/v1|r0|unstable)/groups/.*$
Additionally, the following REST endpoints can be handled, but all requests must
be routed to the same instance:
diff --git a/res/templates-dinsic/mail-Vector.css b/res/templates-dinsic/mail-Vector.css
new file mode 100644
index 0000000000..6a3e36eda1
--- /dev/null
+++ b/res/templates-dinsic/mail-Vector.css
@@ -0,0 +1,7 @@
+.header {
+ border-bottom: 4px solid #e4f7ed ! important;
+}
+
+.notif_link a, .footer a {
+ color: #76CFA6 ! important;
+}
diff --git a/res/templates-dinsic/mail.css b/res/templates-dinsic/mail.css
new file mode 100644
index 0000000000..5ab3e1b06d
--- /dev/null
+++ b/res/templates-dinsic/mail.css
@@ -0,0 +1,156 @@
+body {
+ margin: 0px;
+}
+
+pre, code {
+ word-break: break-word;
+ white-space: pre-wrap;
+}
+
+#page {
+ font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
+ font-color: #454545;
+ font-size: 12pt;
+ width: 100%;
+ padding: 20px;
+}
+
+#inner {
+ width: 640px;
+}
+
+.header {
+ width: 100%;
+ height: 87px;
+ color: #454545;
+ border-bottom: 4px solid #e5e5e5;
+}
+
+.logo {
+ text-align: right;
+ margin-left: 20px;
+}
+
+.salutation {
+ padding-top: 10px;
+ font-weight: bold;
+}
+
+.summarytext {
+}
+
+.room {
+ width: 100%;
+ color: #454545;
+ border-bottom: 1px solid #e5e5e5;
+}
+
+.room_header td {
+ padding-top: 38px;
+ padding-bottom: 10px;
+ border-bottom: 1px solid #e5e5e5;
+}
+
+.room_name {
+ vertical-align: middle;
+ font-size: 18px;
+ font-weight: bold;
+}
+
+.room_header h2 {
+ margin-top: 0px;
+ margin-left: 75px;
+ font-size: 20px;
+}
+
+.room_avatar {
+ width: 56px;
+ line-height: 0px;
+ text-align: center;
+ vertical-align: middle;
+}
+
+.room_avatar img {
+ width: 48px;
+ height: 48px;
+ object-fit: cover;
+ border-radius: 24px;
+}
+
+.notif {
+ border-bottom: 1px solid #e5e5e5;
+ margin-top: 16px;
+ padding-bottom: 16px;
+}
+
+.historical_message .sender_avatar {
+ opacity: 0.3;
+}
+
+/* spell out opacity and historical_message class names for Outlook aka Word */
+.historical_message .sender_name {
+ color: #e3e3e3;
+}
+
+.historical_message .message_time {
+ color: #e3e3e3;
+}
+
+.historical_message .message_body {
+ color: #c7c7c7;
+}
+
+.historical_message td,
+.message td {
+ padding-top: 10px;
+}
+
+.sender_avatar {
+ width: 56px;
+ text-align: center;
+ vertical-align: top;
+}
+
+.sender_avatar img {
+ margin-top: -2px;
+ width: 32px;
+ height: 32px;
+ border-radius: 16px;
+}
+
+.sender_name {
+ display: inline;
+ font-size: 13px;
+ color: #a2a2a2;
+}
+
+.message_time {
+ text-align: right;
+ width: 100px;
+ font-size: 11px;
+ color: #a2a2a2;
+}
+
+.message_body {
+}
+
+.notif_link td {
+ padding-top: 10px;
+ padding-bottom: 10px;
+ font-weight: bold;
+}
+
+.notif_link a, .footer a {
+ color: #454545;
+ text-decoration: none;
+}
+
+.debug {
+ font-size: 10px;
+ color: #888;
+}
+
+.footer {
+ margin-top: 20px;
+ text-align: center;
+}
\ No newline at end of file
diff --git a/res/templates-dinsic/notif.html b/res/templates-dinsic/notif.html
new file mode 100644
index 0000000000..bcdfeea9da
--- /dev/null
+++ b/res/templates-dinsic/notif.html
@@ -0,0 +1,45 @@
+{% for message in notif.messages %}
+ <tr class="{{ "historical_message" if message.is_historical else "message" }}">
+ <td class="sender_avatar">
+ {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
+ {% if message.sender_avatar_url %}
+ <img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
+ {% else %}
+ {% if message.sender_hash % 3 == 0 %}
+ <img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
+ {% elif message.sender_hash % 3 == 1 %}
+ <img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
+ {% else %}
+ <img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
+ {% endif %}
+ {% endif %}
+ {% endif %}
+ </td>
+ <td class="message_contents">
+ {% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
+ <div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
+ {% endif %}
+ <div class="message_body">
+ {% if message.msgtype == "m.text" %}
+ {{ message.body_text_html }}
+ {% elif message.msgtype == "m.emote" %}
+ {{ message.body_text_html }}
+ {% elif message.msgtype == "m.notice" %}
+ {{ message.body_text_html }}
+ {% elif message.msgtype == "m.image" %}
+ <img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
+ {% elif message.msgtype == "m.file" %}
+ <span class="filename">{{ message.body_text_plain }}</span>
+ {% endif %}
+ </div>
+ </td>
+ <td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
+ </tr>
+{% endfor %}
+<tr class="notif_link">
+ <td></td>
+ <td>
+ <a href="{{ notif.link }}">Voir {{ room.title }}</a>
+ </td>
+ <td></td>
+</tr>
diff --git a/res/templates-dinsic/notif.txt b/res/templates-dinsic/notif.txt
new file mode 100644
index 0000000000..3dff1bb570
--- /dev/null
+++ b/res/templates-dinsic/notif.txt
@@ -0,0 +1,16 @@
+{% for message in notif.messages %}
+{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
+{% if message.msgtype == "m.text" %}
+{{ message.body_text_plain }}
+{% elif message.msgtype == "m.emote" %}
+{{ message.body_text_plain }}
+{% elif message.msgtype == "m.notice" %}
+{{ message.body_text_plain }}
+{% elif message.msgtype == "m.image" %}
+{{ message.body_text_plain }}
+{% elif message.msgtype == "m.file" %}
+{{ message.body_text_plain }}
+{% endif %}
+{% endfor %}
+
+Voir {{ room.title }} à {{ notif.link }}
diff --git a/res/templates-dinsic/notif_mail.html b/res/templates-dinsic/notif_mail.html
new file mode 100644
index 0000000000..1e1efa74b2
--- /dev/null
+++ b/res/templates-dinsic/notif_mail.html
@@ -0,0 +1,55 @@
+<!doctype html>
+<html lang="en">
+ <head>
+ <style type="text/css">
+ {% include 'mail.css' without context %}
+ {% include "mail-%s.css" % app_name ignore missing without context %}
+ </style>
+ </head>
+ <body>
+ <table id="page">
+ <tr>
+ <td> </td>
+ <td id="inner">
+ <table class="header">
+ <tr>
+ <td>
+ <div class="salutation">Bonjour {{ user_display_name }},</div>
+ <div class="summarytext">{{ summary_text }}</div>
+ </td>
+ <td class="logo">
+ {% if app_name == "Riot" %}
+ <img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
+ {% elif app_name == "Vector" %}
+ <img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
+ {% else %}
+ <img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
+ {% endif %}
+ </td>
+ </tr>
+ </table>
+ {% for room in rooms %}
+ {% include 'room.html' with context %}
+ {% endfor %}
+ <div class="footer">
+ <a href="{{ unsubscribe_link }}">Se désinscrire</a>
+ <br/>
+ <br/>
+ <div class="debug">
+ Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
+ an event was received at {{ reason.received_at|format_ts("%c") }}
+ which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
+ {% if reason.last_sent_ts %}
+ and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
+ which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
+ {% else %}
+ and we don't have a last time we sent a mail for this room.
+ {% endif %}
+ </div>
+ </div>
+ </td>
+ <td> </td>
+ </tr>
+ </table>
+ </body>
+</html>
diff --git a/res/templates-dinsic/notif_mail.txt b/res/templates-dinsic/notif_mail.txt
new file mode 100644
index 0000000000..fae877426f
--- /dev/null
+++ b/res/templates-dinsic/notif_mail.txt
@@ -0,0 +1,10 @@
+Bonjour {{ user_display_name }},
+
+{{ summary_text }}
+
+{% for room in rooms %}
+{% include 'room.txt' with context %}
+{% endfor %}
+
+Vous pouvez désactiver ces notifications en cliquant ici {{ unsubscribe_link }}
+
diff --git a/res/templates-dinsic/room.html b/res/templates-dinsic/room.html
new file mode 100644
index 0000000000..0487b1b11c
--- /dev/null
+++ b/res/templates-dinsic/room.html
@@ -0,0 +1,33 @@
+<table class="room">
+ <tr class="room_header">
+ <td class="room_avatar">
+ {% if room.avatar_url %}
+ <img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
+ {% else %}
+ {% if room.hash % 3 == 0 %}
+ <img alt="" src="https://vector.im/beta/img/76cfa6.png" />
+ {% elif room.hash % 3 == 1 %}
+ <img alt="" src="https://vector.im/beta/img/50e2c2.png" />
+ {% else %}
+ <img alt="" src="https://vector.im/beta/img/f4c371.png" />
+ {% endif %}
+ {% endif %}
+ </td>
+ <td class="room_name" colspan="2">
+ {{ room.title }}
+ </td>
+ </tr>
+ {% if room.invite %}
+ <tr>
+ <td></td>
+ <td>
+ <a href="{{ room.link }}">Rejoindre la conversation.</a>
+ </td>
+ <td></td>
+ </tr>
+ {% else %}
+ {% for notif in room.notifs %}
+ {% include 'notif.html' with context %}
+ {% endfor %}
+ {% endif %}
+</table>
diff --git a/res/templates-dinsic/room.txt b/res/templates-dinsic/room.txt
new file mode 100644
index 0000000000..dd36d01d21
--- /dev/null
+++ b/res/templates-dinsic/room.txt
@@ -0,0 +1,9 @@
+{{ room.title }}
+
+{% if room.invite %}
+ Vous avez été invité, rejoignez la conversation en cliquant sur le lien suivant {{ room.link }}
+{% else %}
+ {% for notif in room.notifs %}
+ {% include 'notif.txt' with context %}
+ {% endfor %}
+{% endif %}
diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment
index 0ec5075e79..b8a85abe18 100755
--- a/scripts-dev/check-newsfragment
+++ b/scripts-dev/check-newsfragment
@@ -5,9 +5,9 @@
set -e
-# make sure that origin/develop is up to date
-git remote set-branches --add origin develop
-git fetch origin develop
+# make sure that origin/dinsic is up to date
+git remote set-branches --add origin dinsic
+git fetch origin dinsic
# if there are changes in the debian directory, check that the debian changelog
# has been updated
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 8b1277ad02..4e792fdb93 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -194,6 +194,7 @@ class Auth(object):
access_token = self.get_access_token_from_request(request)
user_id, app_service = yield self._get_appservice_user_id(request)
+
if user_id:
request.authenticated_entity = user_id
opentracing.set_tag("authenticated_entity", user_id)
@@ -258,11 +259,11 @@ class Auth(object):
except KeyError:
raise MissingClientTokenError()
- @defer.inlineCallbacks
def _get_appservice_user_id(self, request):
app_service = self.store.get_app_service_by_token(
self.get_access_token_from_request(request)
)
+
if app_service is None:
return None, None
@@ -280,8 +281,12 @@ class Auth(object):
if not app_service.is_interested_in_user(user_id):
raise AuthError(403, "Application service cannot masquerade as this user.")
- if not (yield self.store.get_user_by_id(user_id)):
- raise AuthError(403, "Application service has not registered this user")
+ # Let ASes manipulate nonexistent users (e.g. to shadow-register them)
+ # if not (yield self.store.get_user_by_id(user_id)):
+ # raise AuthError(
+ # 403,
+ # "Application service has not registered this user"
+ # )
return user_id, app_service
@defer.inlineCallbacks
diff --git a/synapse/api/constants.py b/synapse/api/constants.py
index cc8577552b..42eff8793b 100644
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -85,6 +85,7 @@ class EventTypes(object):
RoomAvatar = "m.room.avatar"
RoomEncryption = "m.room.encryption"
GuestAccess = "m.room.guest_access"
+ Encryption = "m.room.encryption"
# These are used for validation
Message = "m.room.message"
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 0c20601600..de81cb9663 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
-# Copyright 2018 New Vector Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -66,6 +67,13 @@ class Codes(object):
EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
USER_DEACTIVATED = "M_USER_DEACTIVATED"
+ PASSWORD_TOO_SHORT = "M_PASSWORD_TOO_SHORT"
+ PASSWORD_NO_DIGIT = "M_PASSWORD_NO_DIGIT"
+ PASSWORD_NO_UPPERCASE = "M_PASSWORD_NO_UPPERCASE"
+ PASSWORD_NO_LOWERCASE = "M_PASSWORD_NO_LOWERCASE"
+ PASSWORD_NO_SYMBOL = "M_PASSWORD_NO_SYMBOL"
+ PASSWORD_IN_DICTIONARY = "M_PASSWORD_IN_DICTIONARY"
+ WEAK_PASSWORD = "M_WEAK_PASSWORD"
class CodeMessageException(RuntimeError):
@@ -438,6 +446,18 @@ class IncompatibleRoomVersionError(SynapseError):
return cs_error(self.msg, self.errcode, room_version=self._room_version)
+class PasswordRefusedError(SynapseError):
+ """A password has been refused, either during password reset/change or registration.
+ """
+
+ def __init__(
+ self,
+ msg="This password doesn't comply with the server's policy",
+ errcode=Codes.WEAK_PASSWORD,
+ ):
+ super(PasswordRefusedError, self).__init__(code=400, msg=msg, errcode=errcode)
+
+
class RequestSendFailed(RuntimeError):
"""Sending a HTTP request over federation failed due to not being able to
talk to the remote server for some reason.
diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py
index ca96da6a4a..7fa91a3b11 100644
--- a/synapse/app/client_reader.py
+++ b/synapse/app/client_reader.py
@@ -57,6 +57,7 @@ from synapse.rest.client.v1.room import (
RoomStateRestServlet,
)
from synapse.rest.client.v1.voip import VoipRestServlet
+from synapse.rest.client.v2_alpha import groups
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
@@ -124,6 +125,8 @@ class ClientReaderServer(HomeServer):
PushRuleRestServlet(self).register(resource)
VersionsRestServlet(self).register(resource)
+ groups.register_servlets(self, resource)
+
resources.update({"/_matrix/client": resource})
root_resource = create_resource_tree(resources, NoResource())
diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py
index 1f1cea1416..d055d11b23 100644
--- a/synapse/app/federation_reader.py
+++ b/synapse/app/federation_reader.py
@@ -33,8 +33,10 @@ from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
+from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
+from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
from synapse.replication.slave.storage.profile import SlavedProfileStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
@@ -66,6 +68,8 @@ class FederationReaderSlavedStore(
SlavedEventStore,
SlavedKeyStore,
SlavedRegistrationStore,
+ SlavedGroupServerStore,
+ SlavedDeviceStore,
RoomStore,
DirectoryStore,
SlavedTransactionStore,
diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py
index aea3985a5f..1b13e84425 100644
--- a/synapse/appservice/__init__.py
+++ b/synapse/appservice/__init__.py
@@ -270,7 +270,7 @@ class ApplicationService(object):
def is_exclusive_room(self, room_id):
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
- def get_exlusive_user_regexes(self):
+ def get_exclusive_user_regexes(self):
"""Get the list of regexes used to determine if a user is exclusively
registered by the AS
"""
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 08619404bb..e865fd8f41 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -18,6 +18,7 @@
import argparse
import errno
import os
+from io import open as io_open
from collections import OrderedDict
from textwrap import dedent
from typing import Any, MutableMapping, Optional
@@ -169,7 +170,7 @@ class Config(object):
@classmethod
def read_file(cls, file_path, config_name):
cls.check_file(file_path, config_name)
- with open(file_path) as file_stream:
+ with io_open(file_path, encoding="utf-8") as file_stream:
return file_stream.read()
diff --git a/synapse/config/password.py b/synapse/config/password.py
index 2a634ac751..2c13810ab8 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
-# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2015-2016 OpenMarket Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,6 +33,10 @@ class PasswordConfig(Config):
self.password_localdb_enabled = password_config.get("localdb_enabled", True)
self.password_pepper = password_config.get("pepper", "")
+ # Password policy
+ self.password_policy = password_config.get("policy", {})
+ self.password_policy_enabled = self.password_policy.pop("enabled", False)
+
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """\
password_config:
@@ -48,4 +54,34 @@ class PasswordConfig(Config):
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
#
#pepper: "EVEN_MORE_SECRET"
+
+ # Define and enforce a password policy. Each parameter is optional, boolean
+ # parameters default to 'false' and integer parameters default to 0.
+ # This is an early implementation of MSC2000.
+ #
+ #policy:
+ # Whether to enforce the password policy.
+ #
+ #enabled: true
+
+ # Minimum accepted length for a password.
+ #
+ #minimum_length: 15
+
+ # Whether a password must contain at least one digit.
+ #
+ #require_digit: true
+
+ # Whether a password must contain at least one symbol.
+ # A symbol is any character that's not a number or a letter.
+ #
+ #require_symbol: true
+
+ # Whether a password must contain at least one lowercase letter.
+ #
+ #require_lowercase: true
+
+ # Whether a password must contain at least one lowercase letter.
+ #
+ #require_uppercase: true
"""
diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py
index 4a3bfc4354..dbc3dd7a2c 100644
--- a/synapse/config/ratelimiting.py
+++ b/synapse/config/ratelimiting.py
@@ -70,6 +70,9 @@ class RatelimitConfig(Config):
)
self.rc_registration = RateLimitConfig(config.get("rc_registration", {}))
+ self.rc_third_party_invite = RateLimitConfig(
+ config.get("rc_third_party_invite", {})
+ )
rc_login_config = config.get("rc_login", {})
self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {}))
@@ -109,6 +112,8 @@ class RatelimitConfig(Config):
# - one for login that ratelimits login requests based on the account the
# client is attempting to log into, based on the amount of failed login
# attempts for this account.
+ # - one that ratelimits third-party invites requests based on the account
+ # that's making the requests.
# - one for ratelimiting redactions by room admins. If this is not explicitly
# set then it uses the same ratelimiting as per rc_message. This is useful
# to allow room admins to deal with abuse quickly.
@@ -134,6 +139,10 @@ class RatelimitConfig(Config):
# per_second: 0.17
# burst_count: 3
#
+ #rc_third_party_invite:
+ # per_second: 0.2
+ # burst_count: 10
+ #
#rc_admin_redaction:
# per_second: 1
# burst_count: 50
diff --git a/synapse/config/registration.py b/synapse/config/registration.py
index 9bb3beedbc..7dba213d74 100644
--- a/synapse/config/registration.py
+++ b/synapse/config/registration.py
@@ -99,8 +99,19 @@ class RegistrationConfig(Config):
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
+ self.check_is_for_allowed_local_3pids = config.get(
+ "check_is_for_allowed_local_3pids", None
+ )
+ self.allow_invited_3pids = config.get("allow_invited_3pids", False)
+
+ self.disable_3pid_changes = config.get("disable_3pid_changes", False)
+
self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
self.registration_shared_secret = config.get("registration_shared_secret")
+ self.register_mxid_from_3pid = config.get("register_mxid_from_3pid")
+ self.register_just_use_email_for_display_name = config.get(
+ "register_just_use_email_for_display_name", False
+ )
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
self.trusted_third_party_id_servers = config.get(
@@ -129,6 +140,18 @@ class RegistrationConfig(Config):
raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,))
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
+ self.disable_set_displayname = config.get("disable_set_displayname", False)
+ self.disable_set_avatar_url = config.get("disable_set_avatar_url", False)
+
+ self.replicate_user_profiles_to = config.get("replicate_user_profiles_to", [])
+ if not isinstance(self.replicate_user_profiles_to, list):
+ self.replicate_user_profiles_to = [self.replicate_user_profiles_to]
+
+ self.shadow_server = config.get("shadow_server", None)
+ self.rewrite_identity_server_urls = config.get(
+ "rewrite_identity_server_urls", {}
+ )
+
self.disable_msisdn_registration = config.get(
"disable_msisdn_registration", False
)
@@ -244,9 +267,32 @@ class RegistrationConfig(Config):
#
#disable_msisdn_registration: true
+ # Derive the user's matrix ID from a type of 3PID used when registering.
+ # This overrides any matrix ID the user proposes when calling /register
+ # The 3PID type should be present in registrations_require_3pid to avoid
+ # users failing to register if they don't specify the right kind of 3pid.
+ #
+ #register_mxid_from_3pid: email
+
+ # Uncomment to set the display name of new users to their email address,
+ # rather than using the default heuristic.
+ #
+ #register_just_use_email_for_display_name: true
+
# Mandate that users are only allowed to associate certain formats of
# 3PIDs with accounts on this server.
#
+ # Use an Identity Server to establish which 3PIDs are allowed to register?
+ # Overrides allowed_local_3pids below.
+ #
+ #check_is_for_allowed_local_3pids: matrix.org
+ #
+ # If you are using an IS you can also check whether that IS registers
+ # pending invites for the given 3PID (and then allow it to sign up on
+ # the platform):
+ #
+ #allow_invited_3pids: False
+ #
#allowed_local_3pids:
# - medium: email
# pattern: '.*@matrix\\.org'
@@ -255,6 +301,11 @@ class RegistrationConfig(Config):
# - medium: msisdn
# pattern: '\\+44'
+ # If true, stop users from trying to change the 3PIDs associated with
+ # their accounts.
+ #
+ #disable_3pid_changes: False
+
# Enable 3PIDs lookup requests to identity servers from this server.
#
#enable_3pid_lookup: true
@@ -304,6 +355,30 @@ class RegistrationConfig(Config):
# - matrix.org
# - vector.im
+ # If enabled, user IDs, display names and avatar URLs will be replicated
+ # to this server whenever they change.
+ # This is an experimental API currently implemented by sydent to support
+ # cross-homeserver user directories.
+ #
+ #replicate_user_profiles_to: example.com
+
+ # If specified, attempt to replay registrations, profile changes & 3pid
+ # bindings on the given target homeserver via the AS API. The HS is authed
+ # via a given AS token.
+ #
+ #shadow_server:
+ # hs_url: https://shadow.example.com
+ # hs: shadow.example.com
+ # as_token: 12u394refgbdhivsia
+
+ # If enabled, don't let users set their own display names/avatars
+ # other than for the very first time (unless they are a server admin).
+ # Useful when provisioning users based on the contents of a 3rd party
+ # directory and to avoid ambiguities.
+ #
+ #disable_set_displayname: False
+ #disable_set_avatar_url: False
+
# Handle threepid (email/phone etc) registration and password resets through a set of
# *trusted* identity servers. Note that this allows the configured identity server to
# reset passwords for accounts!
diff --git a/synapse/config/repository.py b/synapse/config/repository.py
index 7d2dd27fd0..5ebc2ea1f1 100644
--- a/synapse/config/repository.py
+++ b/synapse/config/repository.py
@@ -97,6 +97,12 @@ class ContentRepositoryConfig(Config):
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
+ self.max_avatar_size = config.get("max_avatar_size")
+ if self.max_avatar_size:
+ self.max_avatar_size = self.parse_size(self.max_avatar_size)
+
+ self.allowed_avatar_mimetypes = config.get("allowed_avatar_mimetypes", [])
+
self.media_store_path = self.ensure_directory(
config.get("media_store_path", "media_store")
)
@@ -234,6 +240,30 @@ class ContentRepositoryConfig(Config):
#
#max_upload_size: 10M
+ # The largest allowed size for a user avatar. If not defined, no
+ # restriction will be imposed.
+ #
+ # Note that this only applies when an avatar is changed globally.
+ # Per-room avatar changes are not affected. See allow_per_room_profiles
+ # for disabling that functionality.
+ #
+ # Note that user avatar changes will not work if this is set without
+ # using Synapse's local media repo.
+ #
+ #max_avatar_size: 10M
+
+ # Allow mimetypes for a user avatar. If not defined, no restriction will
+ # be imposed.
+ #
+ # Note that this only applies when an avatar is changed globally.
+ # Per-room avatar changes are not affected. See allow_per_room_profiles
+ # for disabling that functionality.
+ #
+ # Note that user avatar changes will not work if this is set without
+ # using Synapse's local media repo.
+ #
+ #allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"]
+
# Maximum number of pixels that will be thumbnailed
#
#max_image_pixels: 32M
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 0ec1b0fadd..0c5b16a220 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -253,6 +253,12 @@ class ServerConfig(Config):
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
+ # Whether to show the users on this homeserver in the user directory. Defaults to
+ # True.
+ self.show_users_in_user_directory = config.get(
+ "show_users_in_user_directory", True
+ )
+
retention_config = config.get("retention")
if retention_config is None:
retention_config = {}
@@ -892,6 +898,74 @@ class ServerConfig(Config):
#
#allow_per_room_profiles: false
+ # Whether to show the users on this homeserver in the user directory. Defaults to
+ # 'true'.
+ #
+ #show_users_in_user_directory: false
+
+ # Message retention policy at the server level.
+ #
+ # Room admins and mods can define a retention period for their rooms using the
+ # 'm.room.retention' state event, and server admins can cap this period by setting
+ # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
+ #
+ # If this feature is enabled, Synapse will regularly look for and purge events
+ # which are older than the room's maximum retention period. Synapse will also
+ # filter events received over federation so that events that should have been
+ # purged are ignored and not stored again.
+ #
+ retention:
+ # The message retention policies feature is disabled by default. Uncomment the
+ # following line to enable it.
+ #
+ #enabled: true
+
+ # Default retention policy. If set, Synapse will apply it to rooms that lack the
+ # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
+ # matter much because Synapse doesn't take it into account yet.
+ #
+ #default_policy:
+ # min_lifetime: 1d
+ # max_lifetime: 1y
+
+ # Retention policy limits. If set, a user won't be able to send a
+ # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
+ # that's not within this range. This is especially useful in closed federations,
+ # in which server admins can make sure every federating server applies the same
+ # rules.
+ #
+ #allowed_lifetime_min: 1d
+ #allowed_lifetime_max: 1y
+
+ # Server admins can define the settings of the background jobs purging the
+ # events which lifetime has expired under the 'purge_jobs' section.
+ #
+ # If no configuration is provided, a single job will be set up to delete expired
+ # events in every room daily.
+ #
+ # Each job's configuration defines which range of message lifetimes the job
+ # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
+ # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
+ # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
+ # lower than or equal to 3 days. Both the minimum and the maximum value of a
+ # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
+ # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
+ # which 'max_lifetime' is lower than or equal to three days.
+ #
+ # The rationale for this per-job configuration is that some rooms might have a
+ # retention policy with a low 'max_lifetime', where history needs to be purged
+ # of outdated messages on a very frequent basis (e.g. every 5min), but not want
+ # that purge to be performed by a job that's iterating over every room it knows,
+ # which would be quite heavy on the server.
+ #
+ #purge_jobs:
+ # - shortest_max_lifetime: 1d
+ # longest_max_lifetime: 3d
+ # interval: 5m:
+ # - shortest_max_lifetime: 3d
+ # longest_max_lifetime: 1y
+ # interval: 24h
+
# How long to keep redacted events in unredacted form in the database. After
# this period redacted events get replaced with their redacted form in the DB.
#
diff --git a/synapse/config/tls.py b/synapse/config/tls.py
index 2e9e478a2a..2514b0713d 100644
--- a/synapse/config/tls.py
+++ b/synapse/config/tls.py
@@ -109,6 +109,8 @@ class TlsConfig(Config):
fed_whitelist_entries = config.get(
"federation_certificate_verification_whitelist", []
)
+ if fed_whitelist_entries is None:
+ fed_whitelist_entries = []
# Support globs (*) in whitelist values
self.federation_certificate_verification_whitelist = [] # type: List[str]
diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py
index c8d19c5d6b..43b6c40456 100644
--- a/synapse/config/user_directory.py
+++ b/synapse/config/user_directory.py
@@ -26,6 +26,7 @@ class UserDirectoryConfig(Config):
def read_config(self, config, **kwargs):
self.user_directory_search_enabled = True
self.user_directory_search_all_users = False
+ self.user_directory_defer_to_id_server = None
user_directory_config = config.get("user_directory", None)
if user_directory_config:
self.user_directory_search_enabled = user_directory_config.get(
@@ -34,6 +35,9 @@ class UserDirectoryConfig(Config):
self.user_directory_search_all_users = user_directory_config.get(
"search_all_users", False
)
+ self.user_directory_defer_to_id_server = user_directory_config.get(
+ "defer_to_id_server", None
+ )
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """
@@ -52,4 +56,9 @@ class UserDirectoryConfig(Config):
#user_directory:
# enabled: true
# search_all_users: false
+ #
+ # # If this is set, user search will be delegated to this ID server instead
+ # # of synapse performing the search itself.
+ # # This is an experimental API.
+ # defer_to_id_server: https://id.example.com
"""
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index f813fa2fe7..a842661a90 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -16,13 +16,13 @@
import os
from distutils.util import strtobool
+from typing import Optional, Type
import six
from unpaddedbase64 import encode_base64
-from synapse.api.errors import UnsupportedRoomVersionError
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
+from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
from synapse.types import JsonDict
from synapse.util.caches import intern_dict
from synapse.util.frozenutils import freeze
@@ -189,9 +189,15 @@ class EventBase(object):
redacts = _event_dict_property("redacts", None)
room_id = _event_dict_property("room_id")
sender = _event_dict_property("sender")
+ state_key = _event_dict_property("state_key")
+ type = _event_dict_property("type")
user_id = _event_dict_property("sender")
@property
+ def event_id(self) -> str:
+ raise NotImplementedError()
+
+ @property
def membership(self):
return self.content["membership"]
@@ -281,10 +287,7 @@ class FrozenEvent(EventBase):
else:
frozen_dict = event_dict
- self.event_id = event_dict["event_id"]
- self.type = event_dict["type"]
- if "state_key" in event_dict:
- self.state_key = event_dict["state_key"]
+ self._event_id = event_dict["event_id"]
super(FrozenEvent, self).__init__(
frozen_dict,
@@ -294,6 +297,10 @@ class FrozenEvent(EventBase):
rejected_reason=rejected_reason,
)
+ @property
+ def event_id(self) -> str:
+ return self._event_id
+
def __str__(self):
return self.__repr__()
@@ -332,9 +339,6 @@ class FrozenEventV2(EventBase):
frozen_dict = event_dict
self._event_id = None
- self.type = event_dict["type"]
- if "state_key" in event_dict:
- self.state_key = event_dict["state_key"]
super(FrozenEventV2, self).__init__(
frozen_dict,
@@ -404,28 +408,7 @@ class FrozenEventV3(FrozenEventV2):
return self._event_id
-def room_version_to_event_format(room_version):
- """Converts a room version string to the event format
-
- Args:
- room_version (str)
-
- Returns:
- int
-
- Raises:
- UnsupportedRoomVersionError if the room version is unknown
- """
- v = KNOWN_ROOM_VERSIONS.get(room_version)
-
- if not v:
- # this can happen if support is withdrawn for a room version
- raise UnsupportedRoomVersionError()
-
- return v.event_format
-
-
-def event_type_from_format_version(format_version):
+def event_type_from_format_version(format_version: int) -> Type[EventBase]:
"""Returns the python type to use to construct an Event object for the
given event format version.
@@ -445,3 +428,14 @@ def event_type_from_format_version(format_version):
return FrozenEventV3
else:
raise Exception("No event format %r" % (format_version,))
+
+
+def make_event_from_dict(
+ event_dict: JsonDict,
+ room_version: RoomVersion = RoomVersions.V1,
+ internal_metadata_dict: JsonDict = {},
+ rejected_reason: Optional[str] = None,
+) -> EventBase:
+ """Construct an EventBase from the given event dict"""
+ event_type = event_type_from_format_version(room_version.event_format)
+ return event_type(event_dict, internal_metadata_dict, rejected_reason)
diff --git a/synapse/events/builder.py b/synapse/events/builder.py
index 8d63ad6dc3..a0c4a40c27 100644
--- a/synapse/events/builder.py
+++ b/synapse/events/builder.py
@@ -28,11 +28,7 @@ from synapse.api.room_versions import (
RoomVersion,
)
from synapse.crypto.event_signing import add_hashes_and_signatures
-from synapse.events import (
- EventBase,
- _EventInternalMetadata,
- event_type_from_format_version,
-)
+from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
from synapse.types import EventID, JsonDict
from synapse.util import Clock
from synapse.util.stringutils import random_string
@@ -256,8 +252,8 @@ def create_local_event_from_event_dict(
event_dict.setdefault("signatures", {})
add_hashes_and_signatures(room_version, event_dict, hostname, signing_key)
- return event_type_from_format_version(format_version)(
- event_dict, internal_metadata_dict=internal_metadata_dict
+ return make_event_from_dict(
+ event_dict, room_version, internal_metadata_dict=internal_metadata_dict
)
diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py
index 5a907718d6..b7a31c9a7d 100644
--- a/synapse/events/spamcheck.py
+++ b/synapse/events/spamcheck.py
@@ -58,13 +58,33 @@ class SpamChecker(object):
return self.spam_checker.check_event_for_spam(event)
- def user_may_invite(self, inviter_userid, invitee_userid, room_id):
+ def user_may_invite(
+ self,
+ inviter_userid,
+ invitee_userid,
+ third_party_invite,
+ room_id,
+ new_room,
+ published_room,
+ ):
"""Checks if a given user may send an invite
If this method returns false, the invite will be rejected.
Args:
- userid (string): The sender's user ID
+ inviter_userid (str)
+ invitee_userid (str|None): The user ID of the invitee. Is None
+ if this is a third party invite and the 3PID is not bound to a
+ user ID.
+ third_party_invite (dict|None): If a third party invite then is a
+ dict containing the medium and address of the invitee.
+ room_id (str)
+ new_room (bool): Whether the user is being invited to the room as
+ part of a room creation, if so the invitee would have been
+ included in the call to `user_may_create_room`.
+ published_room (bool): Whether the room the user is being invited
+ to has been published in the local homeserver's public room
+ directory.
Returns:
bool: True if the user may send an invite, otherwise False
@@ -73,16 +93,29 @@ class SpamChecker(object):
return True
return self.spam_checker.user_may_invite(
- inviter_userid, invitee_userid, room_id
+ inviter_userid,
+ invitee_userid,
+ third_party_invite,
+ room_id,
+ new_room,
+ published_room,
)
- def user_may_create_room(self, userid):
+ def user_may_create_room(
+ self, userid, invite_list, third_party_invite_list, cloning
+ ):
"""Checks if a given user may create a room
If this method returns false, the creation request will be rejected.
Args:
userid (string): The sender's user ID
+ invite_list (list[str]): List of user IDs that would be invited to
+ the new room.
+ third_party_invite_list (list[dict]): List of third party invites
+ for the new room.
+ cloning (bool): Whether the user is cloning an existing room, e.g.
+ upgrading a room.
Returns:
bool: True if the user may create a room, otherwise False
@@ -90,7 +123,9 @@ class SpamChecker(object):
if self.spam_checker is None:
return True
- return self.spam_checker.user_may_create_room(userid)
+ return self.spam_checker.user_may_create_room(
+ userid, invite_list, third_party_invite_list, cloning
+ )
def user_may_create_room_alias(self, userid, room_alias):
"""Checks if a given user may create a room alias
@@ -125,3 +160,21 @@ class SpamChecker(object):
return True
return self.spam_checker.user_may_publish_room(userid, room_id)
+
+ def user_may_join_room(self, userid, room_id, is_invited):
+ """Checks if a given users is allowed to join a room.
+
+ Is not called when the user creates a room.
+
+ Args:
+ userid (str)
+ room_id (str)
+ is_invited (bool): Whether the user is invited into the room
+
+ Returns:
+ bool: Whether the user may join the room
+ """
+ if self.spam_checker is None:
+ return True
+
+ return self.spam_checker.user_may_join_room(userid, room_id, is_invited)
diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 86f7e5f8aa..459132d388 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -74,15 +74,16 @@ class ThirdPartyEventRules(object):
is_requester_admin (bool): If the requester is an admin
Returns:
- defer.Deferred
+ defer.Deferred[bool]: Whether room creation is allowed or denied.
"""
if self.third_party_rules is None:
- return
+ return True
- yield self.third_party_rules.on_create_room(
+ ret = yield self.third_party_rules.on_create_room(
requester, config, is_requester_admin
)
+ return ret
@defer.inlineCallbacks
def check_threepid_can_be_invited(self, medium, address, room_id):
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index 0e22183280..eea64c1c9f 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
+# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,9 +23,13 @@ from twisted.internet.defer import DeferredList
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions
+from synapse.api.room_versions import (
+ KNOWN_ROOM_VERSIONS,
+ EventFormatVersions,
+ RoomVersion,
+)
from synapse.crypto.event_signing import check_event_content_hash
-from synapse.events import event_type_from_format_version
+from synapse.events import EventBase, make_event_from_dict
from synapse.events.utils import prune_event
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
@@ -33,7 +38,7 @@ from synapse.logging.context import (
make_deferred_yieldable,
preserve_fn,
)
-from synapse.types import get_domain_from_id
+from synapse.types import JsonDict, get_domain_from_id
from synapse.util import unwrapFirstError
logger = logging.getLogger(__name__)
@@ -342,16 +347,15 @@ def _is_invite_via_3pid(event):
)
-def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
- """Construct a FrozenEvent from an event json received over federation
+def event_from_pdu_json(
+ pdu_json: JsonDict, room_version: RoomVersion, outlier: bool = False
+) -> EventBase:
+ """Construct an EventBase from an event json received over federation
Args:
- pdu_json (object): pdu as received over federation
- event_format_version (int): The event format version
- outlier (bool): True to mark this event as an outlier
-
- Returns:
- FrozenEvent
+ pdu_json: pdu as received over federation
+ room_version: The version of the room this event belongs to
+ outlier: True to mark this event as an outlier
Raises:
SynapseError: if the pdu is missing required fields or is otherwise
@@ -370,8 +374,7 @@ def event_from_pdu_json(pdu_json, event_format_version, outlier=False):
elif depth > MAX_DEPTH:
raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
- event = event_type_from_format_version(event_format_version)(pdu_json)
-
+ event = make_event_from_dict(pdu_json, room_version)
event.internal_metadata.outlier = outlier
return event
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index f99d17a7de..4870e39652 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -17,7 +17,18 @@
import copy
import itertools
import logging
-from typing import Dict, Iterable
+from typing import (
+ Any,
+ Awaitable,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ TypeVar,
+)
from prometheus_client import Counter
@@ -35,12 +46,14 @@ from synapse.api.errors import (
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
+ RoomVersion,
RoomVersions,
)
-from synapse.events import builder, room_version_to_event_format
+from synapse.events import EventBase, builder
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.utils import log_function
+from synapse.types import JsonDict
from synapse.util import unwrapFirstError
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.retryutils import NotRetryingDestination
@@ -52,6 +65,8 @@ sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["t
PDU_RETRY_TIME_MS = 1 * 60 * 1000
+T = TypeVar("T")
+
class InvalidResponseError(RuntimeError):
"""Helper for _try_destination_list: indicates that the server returned a response
@@ -170,21 +185,17 @@ class FederationClient(FederationBase):
sent_queries_counter.labels("client_one_time_keys").inc()
return self.transport_layer.claim_client_keys(destination, content, timeout)
- @defer.inlineCallbacks
- @log_function
- def backfill(self, dest, room_id, limit, extremities):
- """Requests some more historic PDUs for the given context from the
+ async def backfill(
+ self, dest: str, room_id: str, limit: int, extremities: Iterable[str]
+ ) -> List[EventBase]:
+ """Requests some more historic PDUs for the given room from the
given destination server.
Args:
dest (str): The remote homeserver to ask.
room_id (str): The room_id to backfill.
- limit (int): The maximum number of PDUs to return.
- extremities (list): List of PDU id and origins of the first pdus
- we have seen from the context
-
- Returns:
- Deferred: Results in the received PDUs.
+ limit (int): The maximum number of events to return.
+ extremities (list): our current backwards extremities, to backfill from
"""
logger.debug("backfill extrem=%s", extremities)
@@ -192,34 +203,37 @@ class FederationClient(FederationBase):
if not extremities:
return
- transaction_data = yield self.transport_layer.backfill(
+ transaction_data = await self.transport_layer.backfill(
dest, room_id, extremities, limit
)
logger.debug("backfill transaction_data=%r", transaction_data)
- room_version = yield self.store.get_room_version_id(room_id)
- format_ver = room_version_to_event_format(room_version)
+ room_version = await self.store.get_room_version(room_id)
pdus = [
- event_from_pdu_json(p, format_ver, outlier=False)
+ event_from_pdu_json(p, room_version, outlier=False)
for p in transaction_data["pdus"]
]
# FIXME: We should handle signature failures more gracefully.
- pdus[:] = yield make_deferred_yieldable(
+ pdus[:] = await make_deferred_yieldable(
defer.gatherResults(
- self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
+ self._check_sigs_and_hashes(room_version.identifier, pdus),
+ consumeErrors=True,
).addErrback(unwrapFirstError)
)
return pdus
- @defer.inlineCallbacks
- @log_function
- def get_pdu(
- self, destinations, event_id, room_version, outlier=False, timeout=None
- ):
+ async def get_pdu(
+ self,
+ destinations: Iterable[str],
+ event_id: str,
+ room_version: RoomVersion,
+ outlier: bool = False,
+ timeout: Optional[int] = None,
+ ) -> Optional[EventBase]:
"""Requests the PDU with given origin and ID from the remote home
servers.
@@ -227,18 +241,17 @@ class FederationClient(FederationBase):
one succeeds.
Args:
- destinations (list): Which homeservers to query
- event_id (str): event to fetch
- room_version (str): version of the room
- outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
+ destinations: Which homeservers to query
+ event_id: event to fetch
+ room_version: version of the room
+ outlier: Indicates whether the PDU is an `outlier`, i.e. if
it's from an arbitary point in the context as opposed to part
of the current block of PDUs. Defaults to `False`
- timeout (int): How long to try (in ms) each destination for before
+ timeout: How long to try (in ms) each destination for before
moving to the next destination. None indicates no timeout.
Returns:
- Deferred: Results in the requested PDU, or None if we were unable to find
- it.
+ The requested PDU, or None if we were unable to find it.
"""
# TODO: Rate limit the number of times we try and get the same event.
@@ -249,8 +262,6 @@ class FederationClient(FederationBase):
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
- format_ver = room_version_to_event_format(room_version)
-
signed_pdu = None
for destination in destinations:
now = self._clock.time_msec()
@@ -259,7 +270,7 @@ class FederationClient(FederationBase):
continue
try:
- transaction_data = yield self.transport_layer.get_event(
+ transaction_data = await self.transport_layer.get_event(
destination, event_id, timeout=timeout
)
@@ -271,7 +282,7 @@ class FederationClient(FederationBase):
)
pdu_list = [
- event_from_pdu_json(p, format_ver, outlier=outlier)
+ event_from_pdu_json(p, room_version, outlier=outlier)
for p in transaction_data["pdus"]
]
@@ -279,7 +290,9 @@ class FederationClient(FederationBase):
pdu = pdu_list[0]
# Check signatures are correct.
- signed_pdu = yield self._check_sigs_and_hash(room_version, pdu)
+ signed_pdu = await self._check_sigs_and_hash(
+ room_version.identifier, pdu
+ )
break
@@ -309,15 +322,16 @@ class FederationClient(FederationBase):
return signed_pdu
- @defer.inlineCallbacks
- def get_room_state_ids(self, destination: str, room_id: str, event_id: str):
+ async def get_room_state_ids(
+ self, destination: str, room_id: str, event_id: str
+ ) -> Tuple[List[str], List[str]]:
"""Calls the /state_ids endpoint to fetch the state at a particular point
in the room, and the auth events for the given event
Returns:
- Tuple[List[str], List[str]]: a tuple of (state event_ids, auth event_ids)
+ a tuple of (state event_ids, auth event_ids)
"""
- result = yield self.transport_layer.get_room_state_ids(
+ result = await self.transport_layer.get_room_state_ids(
destination, room_id, event_id=event_id
)
@@ -331,37 +345,39 @@ class FederationClient(FederationBase):
return state_event_ids, auth_event_ids
- @defer.inlineCallbacks
- @log_function
- def get_event_auth(self, destination, room_id, event_id):
- res = yield self.transport_layer.get_event_auth(destination, room_id, event_id)
+ async def get_event_auth(self, destination, room_id, event_id):
+ res = await self.transport_layer.get_event_auth(destination, room_id, event_id)
- room_version = yield self.store.get_room_version_id(room_id)
- format_ver = room_version_to_event_format(room_version)
+ room_version = await self.store.get_room_version(room_id)
auth_chain = [
- event_from_pdu_json(p, format_ver, outlier=True) for p in res["auth_chain"]
+ event_from_pdu_json(p, room_version, outlier=True)
+ for p in res["auth_chain"]
]
- signed_auth = yield self._check_sigs_and_hash_and_fetch(
- destination, auth_chain, outlier=True, room_version=room_version
+ signed_auth = await self._check_sigs_and_hash_and_fetch(
+ destination, auth_chain, outlier=True, room_version=room_version.identifier
)
signed_auth.sort(key=lambda e: e.depth)
return signed_auth
- @defer.inlineCallbacks
- def _try_destination_list(self, description, destinations, callback):
+ async def _try_destination_list(
+ self,
+ description: str,
+ destinations: Iterable[str],
+ callback: Callable[[str], Awaitable[T]],
+ ) -> T:
"""Try an operation on a series of servers, until it succeeds
Args:
- description (unicode): description of the operation we're doing, for logging
+ description: description of the operation we're doing, for logging
- destinations (Iterable[unicode]): list of server_names to try
+ destinations: list of server_names to try
- callback (callable): Function to run for each server. Passed a single
- argument: the server_name to try. May return a deferred.
+ callback: Function to run for each server. Passed a single
+ argument: the server_name to try.
If the callback raises a CodeMessageException with a 300/400 code,
attempts to perform the operation stop immediately and the exception is
@@ -372,7 +388,7 @@ class FederationClient(FederationBase):
suppressed if the exception is an InvalidResponseError.
Returns:
- The [Deferred] result of callback, if it succeeds
+ The result of callback, if it succeeds
Raises:
SynapseError if the chosen remote server returns a 300/400 code, or
@@ -383,7 +399,7 @@ class FederationClient(FederationBase):
continue
try:
- res = yield callback(destination)
+ res = await callback(destination)
return res
except InvalidResponseError as e:
logger.warning("Failed to %s via %s: %s", description, destination, e)
@@ -402,12 +418,12 @@ class FederationClient(FederationBase):
)
except Exception:
logger.warning(
- "Failed to %s via %s", description, destination, exc_info=1
+ "Failed to %s via %s", description, destination, exc_info=True
)
raise SynapseError(502, "Failed to %s via any server" % (description,))
- def make_membership_event(
+ async def make_membership_event(
self,
destinations: Iterable[str],
room_id: str,
@@ -415,7 +431,7 @@ class FederationClient(FederationBase):
membership: str,
content: dict,
params: Dict[str, str],
- ):
+ ) -> Tuple[str, EventBase, RoomVersion]:
"""
Creates an m.room.member event, with context, without participating in the room.
@@ -436,19 +452,19 @@ class FederationClient(FederationBase):
content: Any additional data to put into the content field of the
event.
params: Query parameters to include in the request.
- Return:
- Deferred[Tuple[str, FrozenEvent, RoomVersion]]: resolves to a tuple of
+
+ Returns:
`(origin, event, room_version)` where origin is the remote
homeserver which generated the event, and room_version is the
version of the room.
- Fails with a `UnsupportedRoomVersionError` if remote responds with
- a room version we don't understand.
+ Raises:
+ UnsupportedRoomVersionError: if remote responds with
+ a room version we don't understand.
- Fails with a ``SynapseError`` if the chosen remote server
- returns a 300/400 code.
+ SynapseError: if the chosen remote server returns a 300/400 code.
- Fails with a ``RuntimeError`` if no servers were reachable.
+ RuntimeError: if no servers were reachable.
"""
valid_memberships = {Membership.JOIN, Membership.LEAVE}
if membership not in valid_memberships:
@@ -457,9 +473,8 @@ class FederationClient(FederationBase):
% (membership, ",".join(valid_memberships))
)
- @defer.inlineCallbacks
- def send_request(destination):
- ret = yield self.transport_layer.make_membership_event(
+ async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]:
+ ret = await self.transport_layer.make_membership_event(
destination, room_id, user_id, membership, params
)
@@ -492,88 +507,83 @@ class FederationClient(FederationBase):
event_dict=pdu_dict,
)
- return (destination, ev, room_version)
+ return destination, ev, room_version
- return self._try_destination_list(
+ return await self._try_destination_list(
"make_" + membership, destinations, send_request
)
- def send_join(self, destinations, pdu, event_format_version):
+ async def send_join(
+ self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion
+ ) -> Dict[str, Any]:
"""Sends a join event to one of a list of homeservers.
Doing so will cause the remote server to add the event to the graph,
and send the event out to the rest of the federation.
Args:
- destinations (str): Candidate homeservers which are probably
+ destinations: Candidate homeservers which are probably
participating in the room.
- pdu (BaseEvent): event to be sent
- event_format_version (int): The event format version
+ pdu: event to be sent
+ room_version: the version of the room (according to the server that
+ did the make_join)
- Return:
- Deferred: resolves to a dict with members ``origin`` (a string
- giving the serer the event was sent to, ``state`` (?) and
+ Returns:
+ a dict with members ``origin`` (a string
+ giving the server the event was sent to, ``state`` (?) and
``auth_chain``.
- Fails with a ``SynapseError`` if the chosen remote server
- returns a 300/400 code.
+ Raises:
+ SynapseError: if the chosen remote server returns a 300/400 code.
- Fails with a ``RuntimeError`` if no servers were reachable.
+ RuntimeError: if no servers were reachable.
"""
- def check_authchain_validity(signed_auth_chain):
- for e in signed_auth_chain:
- if e.type == EventTypes.Create:
- create_event = e
- break
- else:
- raise InvalidResponseError("no %s in auth chain" % (EventTypes.Create,))
-
- # the room version should be sane.
- room_version = create_event.content.get("room_version", "1")
- if room_version not in KNOWN_ROOM_VERSIONS:
- # This shouldn't be possible, because the remote server should have
- # rejected the join attempt during make_join.
- raise InvalidResponseError(
- "room appears to have unsupported version %s" % (room_version,)
- )
-
- @defer.inlineCallbacks
- def send_request(destination):
- content = yield self._do_send_join(destination, pdu)
+ async def send_request(destination) -> Dict[str, Any]:
+ content = await self._do_send_join(destination, pdu)
logger.debug("Got content: %s", content)
state = [
- event_from_pdu_json(p, event_format_version, outlier=True)
+ event_from_pdu_json(p, room_version, outlier=True)
for p in content.get("state", [])
]
auth_chain = [
- event_from_pdu_json(p, event_format_version, outlier=True)
+ event_from_pdu_json(p, room_version, outlier=True)
for p in content.get("auth_chain", [])
]
pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
- room_version = None
+ create_event = None
for e in state:
if (e.type, e.state_key) == (EventTypes.Create, ""):
- room_version = e.content.get(
- "room_version", RoomVersions.V1.identifier
- )
+ create_event = e
break
- if room_version is None:
+ if create_event is None:
# If the state doesn't have a create event then the room is
# invalid, and it would fail auth checks anyway.
raise SynapseError(400, "No create event in state")
- valid_pdus = yield self._check_sigs_and_hash_and_fetch(
+ # the room version should be sane.
+ create_room_version = create_event.content.get(
+ "room_version", RoomVersions.V1.identifier
+ )
+ if create_room_version != room_version.identifier:
+ # either the server that fulfilled the make_join, or the server that is
+ # handling the send_join, is lying.
+ raise InvalidResponseError(
+ "Unexpected room version %s in create event"
+ % (create_room_version,)
+ )
+
+ valid_pdus = await self._check_sigs_and_hash_and_fetch(
destination,
list(pdus.values()),
outlier=True,
- room_version=room_version,
+ room_version=room_version.identifier,
)
valid_pdus_map = {p.event_id: p for p in valid_pdus}
@@ -597,7 +607,17 @@ class FederationClient(FederationBase):
for s in signed_state:
s.internal_metadata = copy.deepcopy(s.internal_metadata)
- check_authchain_validity(signed_auth)
+ # double-check that the same create event has ended up in the auth chain
+ auth_chain_create_events = [
+ e.event_id
+ for e in signed_auth
+ if (e.type, e.state_key) == (EventTypes.Create, "")
+ ]
+ if auth_chain_create_events != [create_event.event_id]:
+ raise InvalidResponseError(
+ "Unexpected create event(s) in auth chain"
+ % (auth_chain_create_events,)
+ )
return {
"state": signed_state,
@@ -605,14 +625,13 @@ class FederationClient(FederationBase):
"origin": destination,
}
- return self._try_destination_list("send_join", destinations, send_request)
+ return await self._try_destination_list("send_join", destinations, send_request)
- @defer.inlineCallbacks
- def _do_send_join(self, destination, pdu):
+ async def _do_send_join(self, destination: str, pdu: EventBase):
time_now = self._clock.time_msec()
try:
- content = yield self.transport_layer.send_join_v2(
+ content = await self.transport_layer.send_join_v2(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -634,7 +653,7 @@ class FederationClient(FederationBase):
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
- resp = yield self.transport_layer.send_join_v1(
+ resp = await self.transport_layer.send_join_v1(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -645,51 +664,45 @@ class FederationClient(FederationBase):
# content.
return resp[1]
- @defer.inlineCallbacks
- def send_invite(self, destination, room_id, event_id, pdu):
- room_version = yield self.store.get_room_version_id(room_id)
+ async def send_invite(
+ self, destination: str, room_id: str, event_id: str, pdu: EventBase,
+ ) -> EventBase:
+ room_version = await self.store.get_room_version(room_id)
- content = yield self._do_send_invite(destination, pdu, room_version)
+ content = await self._do_send_invite(destination, pdu, room_version)
pdu_dict = content["event"]
logger.debug("Got response to send_invite: %s", pdu_dict)
- room_version = yield self.store.get_room_version_id(room_id)
- format_ver = room_version_to_event_format(room_version)
-
- pdu = event_from_pdu_json(pdu_dict, format_ver)
+ pdu = event_from_pdu_json(pdu_dict, room_version)
# Check signatures are correct.
- pdu = yield self._check_sigs_and_hash(room_version, pdu)
+ pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
# FIXME: We should handle signature failures more gracefully.
return pdu
- @defer.inlineCallbacks
- def _do_send_invite(self, destination, pdu, room_version):
+ async def _do_send_invite(
+ self, destination: str, pdu: EventBase, room_version: RoomVersion
+ ) -> JsonDict:
"""Actually sends the invite, first trying v2 API and falling back to
v1 API if necessary.
- Args:
- destination (str): Target server
- pdu (FrozenEvent)
- room_version (str)
-
Returns:
- dict: The event as a dict as returned by the remote server
+ The event as a dict as returned by the remote server
"""
time_now = self._clock.time_msec()
try:
- content = yield self.transport_layer.send_invite_v2(
+ content = await self.transport_layer.send_invite_v2(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
content={
"event": pdu.get_pdu_json(time_now),
- "room_version": room_version,
+ "room_version": room_version.identifier,
"invite_room_state": pdu.unsigned.get("invite_room_state", []),
},
)
@@ -707,8 +720,7 @@ class FederationClient(FederationBase):
# Otherwise, we assume that the remote server doesn't understand
# the v2 invite API. That's ok provided the room uses old-style event
# IDs.
- v = KNOWN_ROOM_VERSIONS.get(room_version)
- if v.event_format != EventFormatVersions.V1:
+ if room_version.event_format != EventFormatVersions.V1:
raise SynapseError(
400,
"User's homeserver does not support this room version",
@@ -722,7 +734,7 @@ class FederationClient(FederationBase):
# Didn't work, try v1 API.
# Note the v1 API returns a tuple of `(200, content)`
- _, content = yield self.transport_layer.send_invite_v1(
+ _, content = await self.transport_layer.send_invite_v1(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -730,7 +742,7 @@ class FederationClient(FederationBase):
)
return content
- def send_leave(self, destinations, pdu):
+ async def send_leave(self, destinations: Iterable[str], pdu: EventBase) -> None:
"""Sends a leave event to one of a list of homeservers.
Doing so will cause the remote server to add the event to the graph,
@@ -739,34 +751,29 @@ class FederationClient(FederationBase):
This is mostly useful to reject received invites.
Args:
- destinations (str): Candidate homeservers which are probably
+ destinations: Candidate homeservers which are probably
participating in the room.
- pdu (BaseEvent): event to be sent
+ pdu: event to be sent
- Return:
- Deferred: resolves to None.
-
- Fails with a ``SynapseError`` if the chosen remote server
- returns a 300/400 code.
+ Raises:
+ SynapseError if the chosen remote server returns a 300/400 code.
- Fails with a ``RuntimeError`` if no servers were reachable.
+ RuntimeError if no servers were reachable.
"""
- @defer.inlineCallbacks
- def send_request(destination):
- content = yield self._do_send_leave(destination, pdu)
-
+ async def send_request(destination: str) -> None:
+ content = await self._do_send_leave(destination, pdu)
logger.debug("Got content: %s", content)
- return None
- return self._try_destination_list("send_leave", destinations, send_request)
+ return await self._try_destination_list(
+ "send_leave", destinations, send_request
+ )
- @defer.inlineCallbacks
- def _do_send_leave(self, destination, pdu):
+ async def _do_send_leave(self, destination, pdu):
time_now = self._clock.time_msec()
try:
- content = yield self.transport_layer.send_leave_v2(
+ content = await self.transport_layer.send_leave_v2(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -788,7 +795,7 @@ class FederationClient(FederationBase):
logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API")
- resp = yield self.transport_layer.send_leave_v1(
+ resp = await self.transport_layer.send_leave_v1(
destination=destination,
room_id=pdu.room_id,
event_id=pdu.event_id,
@@ -820,34 +827,33 @@ class FederationClient(FederationBase):
third_party_instance_id=third_party_instance_id,
)
- @defer.inlineCallbacks
- def get_missing_events(
+ async def get_missing_events(
self,
- destination,
- room_id,
- earliest_events_ids,
- latest_events,
- limit,
- min_depth,
- timeout,
- ):
+ destination: str,
+ room_id: str,
+ earliest_events_ids: Sequence[str],
+ latest_events: Iterable[EventBase],
+ limit: int,
+ min_depth: int,
+ timeout: int,
+ ) -> List[EventBase]:
"""Tries to fetch events we are missing. This is called when we receive
an event without having received all of its ancestors.
Args:
- destination (str)
- room_id (str)
- earliest_events_ids (list): List of event ids. Effectively the
+ destination
+ room_id
+ earliest_events_ids: List of event ids. Effectively the
events we expected to receive, but haven't. `get_missing_events`
should only return events that didn't happen before these.
- latest_events (list): List of events we have received that we don't
+ latest_events: List of events we have received that we don't
have all previous events for.
- limit (int): Maximum number of events to return.
- min_depth (int): Minimum depth of events tor return.
- timeout (int): Max time to wait in ms
+ limit: Maximum number of events to return.
+ min_depth: Minimum depth of events to return.
+ timeout: Max time to wait in ms
"""
try:
- content = yield self.transport_layer.get_missing_events(
+ content = await self.transport_layer.get_missing_events(
destination=destination,
room_id=room_id,
earliest_events=earliest_events_ids,
@@ -857,15 +863,14 @@ class FederationClient(FederationBase):
timeout=timeout,
)
- room_version = yield self.store.get_room_version_id(room_id)
- format_ver = room_version_to_event_format(room_version)
+ room_version = await self.store.get_room_version(room_id)
events = [
- event_from_pdu_json(e, format_ver) for e in content.get("events", [])
+ event_from_pdu_json(e, room_version) for e in content.get("events", [])
]
- signed_events = yield self._check_sigs_and_hash_and_fetch(
- destination, events, outlier=False, room_version=room_version
+ signed_events = await self._check_sigs_and_hash_and_fetch(
+ destination, events, outlier=False, room_version=room_version.identifier
)
except HttpResponseException as e:
if not e.code == 400:
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index a4c97ed458..7f9da49326 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -38,7 +38,6 @@ from synapse.api.errors import (
UnsupportedRoomVersionError,
)
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
-from synapse.events import room_version_to_event_format
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
@@ -54,7 +53,7 @@ from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
ReplicationGetQueryRestServlet,
)
-from synapse.types import get_domain_from_id
+from synapse.types import JsonDict, get_domain_from_id
from synapse.util import glob_to_regex, unwrapFirstError
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.caches.response_cache import ResponseCache
@@ -82,6 +81,8 @@ class FederationServer(FederationBase):
self.handler = hs.get_handlers().federation_handler
self.state = hs.get_state_handler()
+ self.device_handler = hs.get_device_handler()
+
self._server_linearizer = Linearizer("fed_server")
self._transaction_linearizer = Linearizer("fed_txn_handler")
@@ -234,24 +235,17 @@ class FederationServer(FederationBase):
continue
try:
- room_version = await self.store.get_room_version_id(room_id)
+ room_version = await self.store.get_room_version(room_id)
except NotFoundError:
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
continue
-
- try:
- format_ver = room_version_to_event_format(room_version)
- except UnsupportedRoomVersionError:
+ except UnsupportedRoomVersionError as e:
# this can happen if support for a given room version is withdrawn,
# so that we still get events for said room.
- logger.info(
- "Ignoring PDU for room %s with unknown version %s",
- room_id,
- room_version,
- )
+ logger.info("Ignoring PDU: %s", e)
continue
- event = event_from_pdu_json(p, format_ver)
+ event = event_from_pdu_json(p, room_version)
pdus_by_room.setdefault(room_id, []).append(event)
pdu_results = {}
@@ -302,7 +296,12 @@ class FederationServer(FederationBase):
async def _process_edu(edu_dict):
received_edus_counter.inc()
- edu = Edu(**edu_dict)
+ edu = Edu(
+ origin=origin,
+ destination=self.server_name,
+ edu_type=edu_dict["edu_type"],
+ content=edu_dict["content"],
+ )
await self.registry.on_edu(edu.edu_type, origin, edu.content)
await concurrently_execute(
@@ -396,20 +395,21 @@ class FederationServer(FederationBase):
time_now = self._clock.time_msec()
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
- async def on_invite_request(self, origin, content, room_version):
- if room_version not in KNOWN_ROOM_VERSIONS:
+ async def on_invite_request(
+ self, origin: str, content: JsonDict, room_version_id: str
+ ):
+ room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
+ if not room_version:
raise SynapseError(
400,
"Homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
- format_ver = room_version_to_event_format(room_version)
-
- pdu = event_from_pdu_json(content, format_ver)
+ pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
- pdu = await self._check_sigs_and_hash(room_version, pdu)
+ pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
time_now = self._clock.time_msec()
return {"event": ret_pdu.get_pdu_json(time_now)}
@@ -417,16 +417,15 @@ class FederationServer(FederationBase):
async def on_send_join_request(self, origin, content, room_id):
logger.debug("on_send_join_request: content: %s", content)
- room_version = await self.store.get_room_version_id(room_id)
- format_ver = room_version_to_event_format(room_version)
- pdu = event_from_pdu_json(content, format_ver)
+ room_version = await self.store.get_room_version(room_id)
+ pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
- pdu = await self._check_sigs_and_hash(room_version, pdu)
+ pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
res_pdus = await self.handler.on_send_join_request(origin, pdu)
time_now = self._clock.time_msec()
@@ -448,16 +447,15 @@ class FederationServer(FederationBase):
async def on_send_leave_request(self, origin, content, room_id):
logger.debug("on_send_leave_request: content: %s", content)
- room_version = await self.store.get_room_version_id(room_id)
- format_ver = room_version_to_event_format(room_version)
- pdu = event_from_pdu_json(content, format_ver)
+ room_version = await self.store.get_room_version(room_id)
+ pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
- pdu = await self._check_sigs_and_hash(room_version, pdu)
+ pdu = await self._check_sigs_and_hash(room_version.identifier, pdu)
await self.handler.on_send_leave_request(origin, pdu)
return {}
@@ -495,15 +493,14 @@ class FederationServer(FederationBase):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
- room_version = await self.store.get_room_version_id(room_id)
- format_ver = room_version_to_event_format(room_version)
+ room_version = await self.store.get_room_version(room_id)
auth_chain = [
- event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
+ event_from_pdu_json(e, room_version) for e in content["auth_chain"]
]
signed_auth = await self._check_sigs_and_hash_and_fetch(
- origin, auth_chain, outlier=True, room_version=room_version
+ origin, auth_chain, outlier=True, room_version=room_version.identifier
)
ret = await self.handler.on_query_auth(
@@ -528,8 +525,9 @@ class FederationServer(FederationBase):
def on_query_client_keys(self, origin, content):
return self.on_query_request("client_keys", content)
- def on_query_user_devices(self, origin, user_id):
- return self.on_query_request("user_devices", user_id)
+ async def on_query_user_devices(self, origin: str, user_id: str):
+ keys = await self.device_handler.on_federation_query_user_devices(user_id)
+ return 200, keys
@trace
async def on_claim_client_keys(self, origin, content):
@@ -570,7 +568,7 @@ class FederationServer(FederationBase):
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
- logger.info(
+ logger.debug(
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
" limit: %d",
earliest_events,
@@ -583,11 +581,11 @@ class FederationServer(FederationBase):
)
if len(missing_events) < 5:
- logger.info(
+ logger.debug(
"Returning %d events: %r", len(missing_events), missing_events
)
else:
- logger.info("Returning %d events", len(missing_events))
+ logger.debug("Returning %d events", len(missing_events))
time_now = self._clock.time_msec()
diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py
index 36c83c3027..233cb33daf 100644
--- a/synapse/federation/sender/__init__.py
+++ b/synapse/federation/sender/__init__.py
@@ -14,6 +14,7 @@
# limitations under the License.
import logging
+from typing import Dict, Hashable, Iterable, List, Optional, Set
from six import itervalues
@@ -23,6 +24,7 @@ from twisted.internet import defer
import synapse
import synapse.metrics
+from synapse.events import EventBase
from synapse.federation.sender.per_destination_queue import PerDestinationQueue
from synapse.federation.sender.transaction_manager import TransactionManager
from synapse.federation.units import Edu
@@ -39,6 +41,8 @@ from synapse.metrics import (
events_processed_counter,
)
from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.storage.presence import UserPresenceState
+from synapse.types import ReadReceipt
from synapse.util.metrics import Measure, measure_func
logger = logging.getLogger(__name__)
@@ -68,7 +72,7 @@ class FederationSender(object):
self._transaction_manager = TransactionManager(hs)
# map from destination to PerDestinationQueue
- self._per_destination_queues = {} # type: dict[str, PerDestinationQueue]
+ self._per_destination_queues = {} # type: Dict[str, PerDestinationQueue]
LaterGauge(
"synapse_federation_transaction_queue_pending_destinations",
@@ -84,7 +88,7 @@ class FederationSender(object):
# Map of user_id -> UserPresenceState for all the pending presence
# to be sent out by user_id. Entries here get processed and put in
# pending_presence_by_dest
- self.pending_presence = {}
+ self.pending_presence = {} # type: Dict[str, UserPresenceState]
LaterGauge(
"synapse_federation_transaction_queue_pending_pdus",
@@ -116,20 +120,17 @@ class FederationSender(object):
# and that there is a pending call to _flush_rrs_for_room in the system.
self._queues_awaiting_rr_flush_by_room = (
{}
- ) # type: dict[str, set[PerDestinationQueue]]
+ ) # type: Dict[str, Set[PerDestinationQueue]]
self._rr_txn_interval_per_room_ms = (
- 1000.0 / hs.get_config().federation_rr_transactions_per_room_per_second
+ 1000.0 / hs.config.federation_rr_transactions_per_room_per_second
)
- def _get_per_destination_queue(self, destination):
+ def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
"""Get or create a PerDestinationQueue for the given destination
Args:
- destination (str): server_name of remote server
-
- Returns:
- PerDestinationQueue
+ destination: server_name of remote server
"""
queue = self._per_destination_queues.get(destination)
if not queue:
@@ -137,7 +138,7 @@ class FederationSender(object):
self._per_destination_queues[destination] = queue
return queue
- def notify_new_events(self, current_id):
+ def notify_new_events(self, current_id: int) -> None:
"""This gets called when we have some new events we might want to
send out to other servers.
"""
@@ -151,13 +152,12 @@ class FederationSender(object):
"process_event_queue_for_federation", self._process_event_queue_loop
)
- @defer.inlineCallbacks
- def _process_event_queue_loop(self):
+ async def _process_event_queue_loop(self) -> None:
try:
self._is_processing = True
while True:
- last_token = yield self.store.get_federation_out_pos("events")
- next_token, events = yield self.store.get_all_new_events_stream(
+ last_token = await self.store.get_federation_out_pos("events")
+ next_token, events = await self.store.get_all_new_events_stream(
last_token, self._last_poked_id, limit=100
)
@@ -166,8 +166,7 @@ class FederationSender(object):
if not events and next_token >= self._last_poked_id:
break
- @defer.inlineCallbacks
- def handle_event(event):
+ async def handle_event(event: EventBase) -> None:
# Only send events for this server.
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
is_mine = self.is_mine_id(event.sender)
@@ -184,7 +183,7 @@ class FederationSender(object):
# Otherwise if the last member on a server in a room is
# banned then it won't receive the event because it won't
# be in the room after the ban.
- destinations = yield self.state.get_hosts_in_room_at_events(
+ destinations = await self.state.get_hosts_in_room_at_events(
event.room_id, event_ids=event.prev_event_ids()
)
except Exception:
@@ -206,17 +205,16 @@ class FederationSender(object):
self._send_pdu(event, destinations)
- @defer.inlineCallbacks
- def handle_room_events(events):
+ async def handle_room_events(events: Iterable[EventBase]) -> None:
with Measure(self.clock, "handle_room_events"):
for event in events:
- yield handle_event(event)
+ await handle_event(event)
- events_by_room = {}
+ events_by_room = {} # type: Dict[str, List[EventBase]]
for event in events:
events_by_room.setdefault(event.room_id, []).append(event)
- yield make_deferred_yieldable(
+ await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(handle_room_events, evs)
@@ -226,11 +224,11 @@ class FederationSender(object):
)
)
- yield self.store.update_federation_out_pos("events", next_token)
+ await self.store.update_federation_out_pos("events", next_token)
if events:
now = self.clock.time_msec()
- ts = yield self.store.get_received_ts(events[-1].event_id)
+ ts = await self.store.get_received_ts(events[-1].event_id)
synapse.metrics.event_processing_lag.labels(
"federation_sender"
@@ -254,7 +252,7 @@ class FederationSender(object):
finally:
self._is_processing = False
- def _send_pdu(self, pdu, destinations):
+ def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None:
# We loop through all destinations to see whether we already have
# a transaction in progress. If we do, stick it in the pending_pdus
# table and we'll get back to it later.
@@ -276,11 +274,11 @@ class FederationSender(object):
self._get_per_destination_queue(destination).send_pdu(pdu, order)
@defer.inlineCallbacks
- def send_read_receipt(self, receipt):
+ def send_read_receipt(self, receipt: ReadReceipt):
"""Send a RR to any other servers in the room
Args:
- receipt (synapse.types.ReadReceipt): receipt to be sent
+ receipt: receipt to be sent
"""
# Some background on the rate-limiting going on here.
@@ -343,7 +341,7 @@ class FederationSender(object):
else:
queue.flush_read_receipts_for_room(room_id)
- def _schedule_rr_flush_for_room(self, room_id, n_domains):
+ def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None:
# that is going to cause approximately len(domains) transactions, so now back
# off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
@@ -352,7 +350,7 @@ class FederationSender(object):
self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
self._queues_awaiting_rr_flush_by_room[room_id] = set()
- def _flush_rrs_for_room(self, room_id):
+ def _flush_rrs_for_room(self, room_id: str) -> None:
queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
logger.debug("Flushing RRs in %s to %s", room_id, queues)
@@ -368,14 +366,11 @@ class FederationSender(object):
@preserve_fn # the caller should not yield on this
@defer.inlineCallbacks
- def send_presence(self, states):
+ def send_presence(self, states: List[UserPresenceState]):
"""Send the new presence states to the appropriate destinations.
This actually queues up the presence states ready for sending and
triggers a background task to process them and send out the transactions.
-
- Args:
- states (list(UserPresenceState))
"""
if not self.hs.config.use_presence:
# No-op if presence is disabled.
@@ -412,11 +407,10 @@ class FederationSender(object):
finally:
self._processing_pending_presence = False
- def send_presence_to_destinations(self, states, destinations):
+ def send_presence_to_destinations(
+ self, states: List[UserPresenceState], destinations: List[str]
+ ) -> None:
"""Send the given presence states to the given destinations.
-
- Args:
- states (list[UserPresenceState])
destinations (list[str])
"""
@@ -431,12 +425,9 @@ class FederationSender(object):
@measure_func("txnqueue._process_presence")
@defer.inlineCallbacks
- def _process_presence_inner(self, states):
+ def _process_presence_inner(self, states: List[UserPresenceState]):
"""Given a list of states populate self.pending_presence_by_dest and
poke to send a new transaction to each destination
-
- Args:
- states (list(UserPresenceState))
"""
hosts_and_states = yield get_interested_remotes(self.store, states, self.state)
@@ -446,14 +437,20 @@ class FederationSender(object):
continue
self._get_per_destination_queue(destination).send_presence(states)
- def build_and_send_edu(self, destination, edu_type, content, key=None):
+ def build_and_send_edu(
+ self,
+ destination: str,
+ edu_type: str,
+ content: dict,
+ key: Optional[Hashable] = None,
+ ):
"""Construct an Edu object, and queue it for sending
Args:
- destination (str): name of server to send to
- edu_type (str): type of EDU to send
- content (dict): content of EDU
- key (Any|None): clobbering key for this edu
+ destination: name of server to send to
+ edu_type: type of EDU to send
+ content: content of EDU
+ key: clobbering key for this edu
"""
if destination == self.server_name:
logger.info("Not sending EDU to ourselves")
@@ -468,12 +465,12 @@ class FederationSender(object):
self.send_edu(edu, key)
- def send_edu(self, edu, key):
+ def send_edu(self, edu: Edu, key: Optional[Hashable]):
"""Queue an EDU for sending
Args:
- edu (Edu): edu to send
- key (Any|None): clobbering key for this edu
+ edu: edu to send
+ key: clobbering key for this edu
"""
queue = self._get_per_destination_queue(edu.destination)
if key:
@@ -481,7 +478,7 @@ class FederationSender(object):
else:
queue.send_edu(edu)
- def send_device_messages(self, destination):
+ def send_device_messages(self, destination: str):
if destination == self.server_name:
logger.warning("Not sending device update to ourselves")
return
@@ -501,5 +498,5 @@ class FederationSender(object):
self._get_per_destination_queue(destination).attempt_new_transaction()
- def get_current_token(self):
+ def get_current_token(self) -> int:
return 0
diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py
index 5012aaea35..e13cd20ffa 100644
--- a/synapse/federation/sender/per_destination_queue.py
+++ b/synapse/federation/sender/per_destination_queue.py
@@ -15,11 +15,11 @@
# limitations under the License.
import datetime
import logging
+from typing import Dict, Hashable, Iterable, List, Tuple
from prometheus_client import Counter
-from twisted.internet import defer
-
+import synapse.server
from synapse.api.errors import (
FederationDeniedError,
HttpResponseException,
@@ -31,7 +31,7 @@ from synapse.handlers.presence import format_user_presence_state
from synapse.metrics import sent_transactions_counter
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.presence import UserPresenceState
-from synapse.types import StateMap
+from synapse.types import ReadReceipt
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
# This is defined in the Matrix spec and enforced by the receiver.
@@ -56,13 +56,18 @@ class PerDestinationQueue(object):
Manages the per-destination transmission queues.
Args:
- hs (synapse.HomeServer):
- transaction_sender (TransactionManager):
- destination (str): the server_name of the destination that we are managing
+ hs
+ transaction_sender
+ destination: the server_name of the destination that we are managing
transmission for.
"""
- def __init__(self, hs, transaction_manager, destination):
+ def __init__(
+ self,
+ hs: "synapse.server.HomeServer",
+ transaction_manager: "synapse.federation.sender.TransactionManager",
+ destination: str,
+ ):
self._server_name = hs.hostname
self._clock = hs.get_clock()
self._store = hs.get_datastore()
@@ -72,20 +77,20 @@ class PerDestinationQueue(object):
self.transmission_loop_running = False
# a list of tuples of (pending pdu, order)
- self._pending_pdus = [] # type: list[tuple[EventBase, int]]
- self._pending_edus = [] # type: list[Edu]
+ self._pending_pdus = [] # type: List[Tuple[EventBase, int]]
+ self._pending_edus = [] # type: List[Edu]
# Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered
# based on their key (e.g. typing events by room_id)
# Map of (edu_type, key) -> Edu
- self._pending_edus_keyed = {} # type: StateMap[Edu]
+ self._pending_edus_keyed = {} # type: Dict[Tuple[str, Hashable], Edu]
# Map of user_id -> UserPresenceState of pending presence to be sent to this
# destination
- self._pending_presence = {} # type: dict[str, UserPresenceState]
+ self._pending_presence = {} # type: Dict[str, UserPresenceState]
# room_id -> receipt_type -> user_id -> receipt_dict
- self._pending_rrs = {}
+ self._pending_rrs = {} # type: Dict[str, Dict[str, Dict[str, dict]]]
self._rrs_pending_flush = False
# stream_id of last successfully sent to-device message.
@@ -95,50 +100,50 @@ class PerDestinationQueue(object):
# stream_id of last successfully sent device list update.
self._last_device_list_stream_id = 0
- def __str__(self):
+ def __str__(self) -> str:
return "PerDestinationQueue[%s]" % self._destination
- def pending_pdu_count(self):
+ def pending_pdu_count(self) -> int:
return len(self._pending_pdus)
- def pending_edu_count(self):
+ def pending_edu_count(self) -> int:
return (
len(self._pending_edus)
+ len(self._pending_presence)
+ len(self._pending_edus_keyed)
)
- def send_pdu(self, pdu, order):
+ def send_pdu(self, pdu: EventBase, order: int) -> None:
"""Add a PDU to the queue, and start the transmission loop if neccessary
Args:
- pdu (EventBase): pdu to send
- order (int):
+ pdu: pdu to send
+ order
"""
self._pending_pdus.append((pdu, order))
self.attempt_new_transaction()
- def send_presence(self, states):
+ def send_presence(self, states: Iterable[UserPresenceState]) -> None:
"""Add presence updates to the queue. Start the transmission loop if neccessary.
Args:
- states (iterable[UserPresenceState]): presence to send
+ states: presence to send
"""
self._pending_presence.update({state.user_id: state for state in states})
self.attempt_new_transaction()
- def queue_read_receipt(self, receipt):
+ def queue_read_receipt(self, receipt: ReadReceipt) -> None:
"""Add a RR to the list to be sent. Doesn't start the transmission loop yet
(see flush_read_receipts_for_room)
Args:
- receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued
+ receipt: receipt to be queued
"""
self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
receipt.receipt_type, {}
)[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
- def flush_read_receipts_for_room(self, room_id):
+ def flush_read_receipts_for_room(self, room_id: str) -> None:
# if we don't have any read-receipts for this room, it may be that we've already
# sent them out, so we don't need to flush.
if room_id not in self._pending_rrs:
@@ -146,15 +151,15 @@ class PerDestinationQueue(object):
self._rrs_pending_flush = True
self.attempt_new_transaction()
- def send_keyed_edu(self, edu, key):
+ def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
self._pending_edus_keyed[(edu.edu_type, key)] = edu
self.attempt_new_transaction()
- def send_edu(self, edu):
+ def send_edu(self, edu) -> None:
self._pending_edus.append(edu)
self.attempt_new_transaction()
- def attempt_new_transaction(self):
+ def attempt_new_transaction(self) -> None:
"""Try to start a new transaction to this destination
If there is already a transaction in progress to this destination,
@@ -177,23 +182,22 @@ class PerDestinationQueue(object):
self._transaction_transmission_loop,
)
- @defer.inlineCallbacks
- def _transaction_transmission_loop(self):
- pending_pdus = []
+ async def _transaction_transmission_loop(self) -> None:
+ pending_pdus = [] # type: List[Tuple[EventBase, int]]
try:
self.transmission_loop_running = True
# This will throw if we wouldn't retry. We do this here so we fail
# quickly, but we will later check this again in the http client,
# hence why we throw the result away.
- yield get_retry_limiter(self._destination, self._clock, self._store)
+ await get_retry_limiter(self._destination, self._clock, self._store)
pending_pdus = []
while True:
# We have to keep 2 free slots for presence and rr_edus
limit = MAX_EDUS_PER_TRANSACTION - 2
- device_update_edus, dev_list_id = yield self._get_device_update_edus(
+ device_update_edus, dev_list_id = await self._get_device_update_edus(
limit
)
@@ -202,7 +206,7 @@ class PerDestinationQueue(object):
(
to_device_edus,
device_stream_id,
- ) = yield self._get_to_device_message_edus(limit)
+ ) = await self._get_to_device_message_edus(limit)
pending_edus = device_update_edus + to_device_edus
@@ -269,7 +273,7 @@ class PerDestinationQueue(object):
# END CRITICAL SECTION
- success = yield self._transaction_manager.send_new_transaction(
+ success = await self._transaction_manager.send_new_transaction(
self._destination, pending_pdus, pending_edus
)
if success:
@@ -280,7 +284,7 @@ class PerDestinationQueue(object):
# Remove the acknowledged device messages from the database
# Only bother if we actually sent some device messages
if to_device_edus:
- yield self._store.delete_device_msgs_for_remote(
+ await self._store.delete_device_msgs_for_remote(
self._destination, device_stream_id
)
@@ -289,7 +293,7 @@ class PerDestinationQueue(object):
logger.info(
"Marking as sent %r %r", self._destination, dev_list_id
)
- yield self._store.mark_as_sent_devices_by_remote(
+ await self._store.mark_as_sent_devices_by_remote(
self._destination, dev_list_id
)
@@ -334,7 +338,7 @@ class PerDestinationQueue(object):
# We want to be *very* sure we clear this after we stop processing
self.transmission_loop_running = False
- def _get_rr_edus(self, force_flush):
+ def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
if not self._pending_rrs:
return
if not force_flush and not self._rrs_pending_flush:
@@ -351,17 +355,16 @@ class PerDestinationQueue(object):
self._rrs_pending_flush = False
yield edu
- def _pop_pending_edus(self, limit):
+ def _pop_pending_edus(self, limit: int) -> List[Edu]:
pending_edus = self._pending_edus
pending_edus, self._pending_edus = pending_edus[:limit], pending_edus[limit:]
return pending_edus
- @defer.inlineCallbacks
- def _get_device_update_edus(self, limit):
+ async def _get_device_update_edus(self, limit: int) -> Tuple[List[Edu], int]:
last_device_list = self._last_device_list_stream_id
# Retrieve list of new device updates to send to the destination
- now_stream_id, results = yield self._store.get_device_updates_by_remote(
+ now_stream_id, results = await self._store.get_device_updates_by_remote(
self._destination, last_device_list, limit=limit
)
edus = [
@@ -378,11 +381,10 @@ class PerDestinationQueue(object):
return (edus, now_stream_id)
- @defer.inlineCallbacks
- def _get_to_device_message_edus(self, limit):
+ async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]:
last_device_stream_id = self._last_device_stream_id
to_device_stream_id = self._store.get_to_device_stream_token()
- contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
+ contents, stream_id = await self._store.get_new_device_msgs_for_remote(
self._destination, last_device_stream_id, to_device_stream_id, limit
)
edus = [
diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py
index 5fed626d5b..3c2a02a3b3 100644
--- a/synapse/federation/sender/transaction_manager.py
+++ b/synapse/federation/sender/transaction_manager.py
@@ -13,14 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+from typing import List
from canonicaljson import json
-from twisted.internet import defer
-
+import synapse.server
from synapse.api.errors import HttpResponseException
+from synapse.events import EventBase
from synapse.federation.persistence import TransactionActions
-from synapse.federation.units import Transaction
+from synapse.federation.units import Edu, Transaction
from synapse.logging.opentracing import (
extract_text_map,
set_tag,
@@ -39,7 +40,7 @@ class TransactionManager(object):
shared between PerDestinationQueue objects
"""
- def __init__(self, hs):
+ def __init__(self, hs: "synapse.server.HomeServer"):
self._server_name = hs.hostname
self.clock = hs.get_clock() # nb must be called this for @measure_func
self._store = hs.get_datastore()
@@ -50,8 +51,9 @@ class TransactionManager(object):
self._next_txn_id = int(self.clock.time_msec())
@measure_func("_send_new_transaction")
- @defer.inlineCallbacks
- def send_new_transaction(self, destination, pending_pdus, pending_edus):
+ async def send_new_transaction(
+ self, destination: str, pending_pdus: List[EventBase], pending_edus: List[Edu]
+ ):
# Make a transaction-sending opentracing span. This span follows on from
# all the edus in that transaction. This needs to be done since there is
@@ -127,7 +129,7 @@ class TransactionManager(object):
return data
try:
- response = yield self._transport_layer.send_transaction(
+ response = await self._transport_layer.send_transaction(
transaction, json_data_cb
)
code = 200
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 125eadd796..92a9ae2320 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -158,7 +158,7 @@ class Authenticator(object):
origin, json_request, now, "Incoming request"
)
- logger.info("Request from %s", origin)
+ logger.debug("Request from %s", origin)
request.authenticated_entity = origin
# If we get a valid signed request from the other side, its probably
@@ -579,7 +579,7 @@ class FederationV1InviteServlet(BaseFederationServlet):
# state resolution algorithm, and we don't use that for processing
# invites
content = await self.handler.on_invite_request(
- origin, content, room_version=RoomVersions.V1.identifier
+ origin, content, room_version_id=RoomVersions.V1.identifier
)
# V1 federation API is defined to return a content of `[200, {...}]`
@@ -606,7 +606,7 @@ class FederationV2InviteServlet(BaseFederationServlet):
event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state
content = await self.handler.on_invite_request(
- origin, event, room_version=room_version
+ origin, event, room_version_id=room_version
)
return 200, content
diff --git a/synapse/federation/units.py b/synapse/federation/units.py
index b4d743cde7..6b32e0dcbf 100644
--- a/synapse/federation/units.py
+++ b/synapse/federation/units.py
@@ -19,11 +19,15 @@ server protocol.
import logging
+import attr
+
+from synapse.types import JsonDict
from synapse.util.jsonobject import JsonEncodedObject
logger = logging.getLogger(__name__)
+@attr.s(slots=True)
class Edu(JsonEncodedObject):
""" An Edu represents a piece of data sent from one homeserver to another.
@@ -32,11 +36,24 @@ class Edu(JsonEncodedObject):
internal ID or previous references graph.
"""
- valid_keys = ["origin", "destination", "edu_type", "content"]
+ edu_type = attr.ib(type=str)
+ content = attr.ib(type=dict)
+ origin = attr.ib(type=str)
+ destination = attr.ib(type=str)
- required_keys = ["edu_type"]
+ def get_dict(self) -> JsonDict:
+ return {
+ "edu_type": self.edu_type,
+ "content": self.content,
+ }
- internal_keys = ["origin", "destination"]
+ def get_internal_dict(self) -> JsonDict:
+ return {
+ "edu_type": self.edu_type,
+ "content": self.content,
+ "origin": self.origin,
+ "destination": self.destination,
+ }
def get_context(self):
return getattr(self, "content", {}).get("org.matrix.opentracing_context", "{}")
diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py
index 0ec9be3cb5..c106abae21 100644
--- a/synapse/groups/groups_server.py
+++ b/synapse/groups/groups_server.py
@@ -36,7 +36,7 @@ logger = logging.getLogger(__name__)
# TODO: Flairs
-class GroupsServerHandler(object):
+class GroupsServerWorkerHandler(object):
def __init__(self, hs):
self.hs = hs
self.store = hs.get_datastore()
@@ -51,9 +51,6 @@ class GroupsServerHandler(object):
self.transport_client = hs.get_federation_transport_client()
self.profile_handler = hs.get_profile_handler()
- # Ensure attestations get renewed
- hs.get_groups_attestation_renewer()
-
@defer.inlineCallbacks
def check_group_is_ours(
self, group_id, requester_user_id, and_exists=False, and_is_admin=None
@@ -168,6 +165,197 @@ class GroupsServerHandler(object):
}
@defer.inlineCallbacks
+ def get_group_categories(self, group_id, requester_user_id):
+ """Get all categories in a group (as seen by user)
+ """
+ yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+ categories = yield self.store.get_group_categories(group_id=group_id)
+ return {"categories": categories}
+
+ @defer.inlineCallbacks
+ def get_group_category(self, group_id, requester_user_id, category_id):
+ """Get a specific category in a group (as seen by user)
+ """
+ yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+ res = yield self.store.get_group_category(
+ group_id=group_id, category_id=category_id
+ )
+
+ logger.info("group %s", res)
+
+ return res
+
+ @defer.inlineCallbacks
+ def get_group_roles(self, group_id, requester_user_id):
+ """Get all roles in a group (as seen by user)
+ """
+ yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+ roles = yield self.store.get_group_roles(group_id=group_id)
+ return {"roles": roles}
+
+ @defer.inlineCallbacks
+ def get_group_role(self, group_id, requester_user_id, role_id):
+ """Get a specific role in a group (as seen by user)
+ """
+ yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+ res = yield self.store.get_group_role(group_id=group_id, role_id=role_id)
+ return res
+
+ @defer.inlineCallbacks
+ def get_group_profile(self, group_id, requester_user_id):
+ """Get the group profile as seen by requester_user_id
+ """
+
+ yield self.check_group_is_ours(group_id, requester_user_id)
+
+ group = yield self.store.get_group(group_id)
+
+ if group:
+ cols = [
+ "name",
+ "short_description",
+ "long_description",
+ "avatar_url",
+ "is_public",
+ ]
+ group_description = {key: group[key] for key in cols}
+ group_description["is_openly_joinable"] = group["join_policy"] == "open"
+
+ return group_description
+ else:
+ raise SynapseError(404, "Unknown group")
+
+ @defer.inlineCallbacks
+ def get_users_in_group(self, group_id, requester_user_id):
+ """Get the users in group as seen by requester_user_id.
+
+ The ordering is arbitrary at the moment
+ """
+
+ yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+ is_user_in_group = yield self.store.is_user_in_group(
+ requester_user_id, group_id
+ )
+
+ user_results = yield self.store.get_users_in_group(
+ group_id, include_private=is_user_in_group
+ )
+
+ chunk = []
+ for user_result in user_results:
+ g_user_id = user_result["user_id"]
+ is_public = user_result["is_public"]
+ is_privileged = user_result["is_admin"]
+
+ entry = {"user_id": g_user_id}
+
+ profile = yield self.profile_handler.get_profile_from_cache(g_user_id)
+ entry.update(profile)
+
+ entry["is_public"] = bool(is_public)
+ entry["is_privileged"] = bool(is_privileged)
+
+ if not self.is_mine_id(g_user_id):
+ attestation = yield self.store.get_remote_attestation(
+ group_id, g_user_id
+ )
+ if not attestation:
+ continue
+
+ entry["attestation"] = attestation
+ else:
+ entry["attestation"] = self.attestations.create_attestation(
+ group_id, g_user_id
+ )
+
+ chunk.append(entry)
+
+ # TODO: If admin add lists of users whose attestations have timed out
+
+ return {"chunk": chunk, "total_user_count_estimate": len(user_results)}
+
+ @defer.inlineCallbacks
+ def get_invited_users_in_group(self, group_id, requester_user_id):
+ """Get the users that have been invited to a group as seen by requester_user_id.
+
+ The ordering is arbitrary at the moment
+ """
+
+ yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+ is_user_in_group = yield self.store.is_user_in_group(
+ requester_user_id, group_id
+ )
+
+ if not is_user_in_group:
+ raise SynapseError(403, "User not in group")
+
+ invited_users = yield self.store.get_invited_users_in_group(group_id)
+
+ user_profiles = []
+
+ for user_id in invited_users:
+ user_profile = {"user_id": user_id}
+ try:
+ profile = yield self.profile_handler.get_profile_from_cache(user_id)
+ user_profile.update(profile)
+ except Exception as e:
+ logger.warning("Error getting profile for %s: %s", user_id, e)
+ user_profiles.append(user_profile)
+
+ return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
+
+ @defer.inlineCallbacks
+ def get_rooms_in_group(self, group_id, requester_user_id):
+ """Get the rooms in group as seen by requester_user_id
+
+ This returns rooms in order of decreasing number of joined users
+ """
+
+ yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
+
+ is_user_in_group = yield self.store.is_user_in_group(
+ requester_user_id, group_id
+ )
+
+ room_results = yield self.store.get_rooms_in_group(
+ group_id, include_private=is_user_in_group
+ )
+
+ chunk = []
+ for room_result in room_results:
+ room_id = room_result["room_id"]
+
+ joined_users = yield self.store.get_users_in_room(room_id)
+ entry = yield self.room_list_handler.generate_room_entry(
+ room_id, len(joined_users), with_alias=False, allow_private=True
+ )
+
+ if not entry:
+ continue
+
+ entry["is_public"] = bool(room_result["is_public"])
+
+ chunk.append(entry)
+
+ chunk.sort(key=lambda e: -e["num_joined_members"])
+
+ return {"chunk": chunk, "total_room_count_estimate": len(room_results)}
+
+
+class GroupsServerHandler(GroupsServerWorkerHandler):
+ def __init__(self, hs):
+ super(GroupsServerHandler, self).__init__(hs)
+
+ # Ensure attestations get renewed
+ hs.get_groups_attestation_renewer()
+
+ @defer.inlineCallbacks
def update_group_summary_room(
self, group_id, requester_user_id, room_id, category_id, content
):
@@ -230,27 +418,6 @@ class GroupsServerHandler(object):
return {}
@defer.inlineCallbacks
- def get_group_categories(self, group_id, requester_user_id):
- """Get all categories in a group (as seen by user)
- """
- yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
- categories = yield self.store.get_group_categories(group_id=group_id)
- return {"categories": categories}
-
- @defer.inlineCallbacks
- def get_group_category(self, group_id, requester_user_id, category_id):
- """Get a specific category in a group (as seen by user)
- """
- yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
- res = yield self.store.get_group_category(
- group_id=group_id, category_id=category_id
- )
-
- return res
-
- @defer.inlineCallbacks
def update_group_category(self, group_id, requester_user_id, category_id, content):
"""Add/Update a group category
"""
@@ -285,24 +452,6 @@ class GroupsServerHandler(object):
return {}
@defer.inlineCallbacks
- def get_group_roles(self, group_id, requester_user_id):
- """Get all roles in a group (as seen by user)
- """
- yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
- roles = yield self.store.get_group_roles(group_id=group_id)
- return {"roles": roles}
-
- @defer.inlineCallbacks
- def get_group_role(self, group_id, requester_user_id, role_id):
- """Get a specific role in a group (as seen by user)
- """
- yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
- res = yield self.store.get_group_role(group_id=group_id, role_id=role_id)
- return res
-
- @defer.inlineCallbacks
def update_group_role(self, group_id, requester_user_id, role_id, content):
"""Add/update a role in a group
"""
@@ -371,30 +520,6 @@ class GroupsServerHandler(object):
return {}
@defer.inlineCallbacks
- def get_group_profile(self, group_id, requester_user_id):
- """Get the group profile as seen by requester_user_id
- """
-
- yield self.check_group_is_ours(group_id, requester_user_id)
-
- group = yield self.store.get_group(group_id)
-
- if group:
- cols = [
- "name",
- "short_description",
- "long_description",
- "avatar_url",
- "is_public",
- ]
- group_description = {key: group[key] for key in cols}
- group_description["is_openly_joinable"] = group["join_policy"] == "open"
-
- return group_description
- else:
- raise SynapseError(404, "Unknown group")
-
- @defer.inlineCallbacks
def update_group_profile(self, group_id, requester_user_id, content):
"""Update the group profile
"""
@@ -413,124 +538,6 @@ class GroupsServerHandler(object):
yield self.store.update_group_profile(group_id, profile)
@defer.inlineCallbacks
- def get_users_in_group(self, group_id, requester_user_id):
- """Get the users in group as seen by requester_user_id.
-
- The ordering is arbitrary at the moment
- """
-
- yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
- is_user_in_group = yield self.store.is_user_in_group(
- requester_user_id, group_id
- )
-
- user_results = yield self.store.get_users_in_group(
- group_id, include_private=is_user_in_group
- )
-
- chunk = []
- for user_result in user_results:
- g_user_id = user_result["user_id"]
- is_public = user_result["is_public"]
- is_privileged = user_result["is_admin"]
-
- entry = {"user_id": g_user_id}
-
- profile = yield self.profile_handler.get_profile_from_cache(g_user_id)
- entry.update(profile)
-
- entry["is_public"] = bool(is_public)
- entry["is_privileged"] = bool(is_privileged)
-
- if not self.is_mine_id(g_user_id):
- attestation = yield self.store.get_remote_attestation(
- group_id, g_user_id
- )
- if not attestation:
- continue
-
- entry["attestation"] = attestation
- else:
- entry["attestation"] = self.attestations.create_attestation(
- group_id, g_user_id
- )
-
- chunk.append(entry)
-
- # TODO: If admin add lists of users whose attestations have timed out
-
- return {"chunk": chunk, "total_user_count_estimate": len(user_results)}
-
- @defer.inlineCallbacks
- def get_invited_users_in_group(self, group_id, requester_user_id):
- """Get the users that have been invited to a group as seen by requester_user_id.
-
- The ordering is arbitrary at the moment
- """
-
- yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
- is_user_in_group = yield self.store.is_user_in_group(
- requester_user_id, group_id
- )
-
- if not is_user_in_group:
- raise SynapseError(403, "User not in group")
-
- invited_users = yield self.store.get_invited_users_in_group(group_id)
-
- user_profiles = []
-
- for user_id in invited_users:
- user_profile = {"user_id": user_id}
- try:
- profile = yield self.profile_handler.get_profile_from_cache(user_id)
- user_profile.update(profile)
- except Exception as e:
- logger.warning("Error getting profile for %s: %s", user_id, e)
- user_profiles.append(user_profile)
-
- return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
-
- @defer.inlineCallbacks
- def get_rooms_in_group(self, group_id, requester_user_id):
- """Get the rooms in group as seen by requester_user_id
-
- This returns rooms in order of decreasing number of joined users
- """
-
- yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True)
-
- is_user_in_group = yield self.store.is_user_in_group(
- requester_user_id, group_id
- )
-
- room_results = yield self.store.get_rooms_in_group(
- group_id, include_private=is_user_in_group
- )
-
- chunk = []
- for room_result in room_results:
- room_id = room_result["room_id"]
-
- joined_users = yield self.store.get_users_in_room(room_id)
- entry = yield self.room_list_handler.generate_room_entry(
- room_id, len(joined_users), with_alias=False, allow_private=True
- )
-
- if not entry:
- continue
-
- entry["is_public"] = bool(room_result["is_public"])
-
- chunk.append(entry)
-
- chunk.sort(key=lambda e: -e["num_joined_members"])
-
- return {"chunk": chunk, "total_room_count_estimate": len(room_results)}
-
- @defer.inlineCallbacks
def add_room_to_group(self, group_id, requester_user_id, room_id, content):
"""Add room to group
"""
diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py
index 829f52eca1..6c46c995d2 100644
--- a/synapse/handlers/account_validity.py
+++ b/synapse/handlers/account_validity.py
@@ -20,6 +20,8 @@ from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import List
+from twisted.internet import defer
+
from synapse.api.errors import StoreError
from synapse.logging.context import make_deferred_yieldable
from synapse.metrics.background_process_metrics import run_as_background_process
@@ -43,6 +45,8 @@ class AccountValidityHandler(object):
self.clock = self.hs.get_clock()
self._account_validity = self.hs.config.account_validity
+ self._show_users_in_user_directory = self.hs.config.show_users_in_user_directory
+ self.profile_handler = self.hs.get_profile_handler()
if self._account_validity.renew_by_email_enabled and load_jinja2_templates:
# Don't do email-specific configuration if renewal by email is disabled.
@@ -82,6 +86,9 @@ class AccountValidityHandler(object):
self.clock.looping_call(send_emails, 30 * 60 * 1000)
+ # Check every hour to remove expired users from the user directory
+ self.clock.looping_call(self._mark_expired_users_as_inactive, 60 * 60 * 1000)
+
async def _send_renewal_emails(self):
"""Gets the list of users whose account is expiring in the amount of time
configured in the ``renew_at`` parameter from the ``account_validity``
@@ -262,4 +269,27 @@ class AccountValidityHandler(object):
user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent
)
+ # Check if renewed users should be reintroduced to the user directory
+ if self._show_users_in_user_directory:
+ # Show the user in the directory again by setting them to active
+ await self.profile_handler.set_active(
+ UserID.from_string(user_id), True, True
+ )
+
return expiration_ts
+
+ @defer.inlineCallbacks
+ def _mark_expired_users_as_inactive(self):
+ """Iterate over expired users. Mark them as inactive in order to hide them from the
+ user directory.
+
+ Returns:
+ Deferred
+ """
+ # Get expired users
+ expired_user_ids = yield self.store.get_expired_users()
+ expired_users = [UserID.from_string(user_id) for user_id in expired_user_ids]
+
+ # Mark each one as non-active
+ for user in expired_users:
+ yield self.profile_handler.set_active(user, False, True)
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 9205865231..f3c0aeceb6 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -58,8 +58,10 @@ class AdminHandler(BaseHandler):
ret = await self.store.get_user_by_id(user.to_string())
if ret:
profile = await self.store.get_profileinfo(user.localpart)
+ threepids = await self.store.user_get_threepids(user.to_string())
ret["displayname"] = profile.display_name
ret["avatar_url"] = profile.avatar_url
+ ret["threepids"] = threepids
return ret
async def export_user_data(self, user_id, writer):
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 54a71c49d2..48a88d3c2a 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -816,6 +816,14 @@ class AuthHandler(BaseHandler):
@defer.inlineCallbacks
def add_threepid(self, user_id, medium, address, validated_at):
+ # check if medium has a valid value
+ if medium not in ["email", "msisdn"]:
+ raise SynapseError(
+ code=400,
+ msg=("'%s' is not a valid value for 'medium'" % (medium,)),
+ errcode=Codes.INVALID_PARAM,
+ )
+
# 'Canonicalise' email addresses down to lower case.
# We've now moving towards the homeserver being the entity that
# is responsible for validating threepids used for resetting passwords
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index 2afb390a92..f624c2a3f9 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -33,6 +33,7 @@ class DeactivateAccountHandler(BaseHandler):
self._device_handler = hs.get_device_handler()
self._room_member_handler = hs.get_room_member_handler()
self._identity_handler = hs.get_handlers().identity_handler
+ self._profile_handler = hs.get_profile_handler()
self.user_directory_handler = hs.get_user_directory_handler()
# Flag that indicates whether the process to part users from rooms is running
@@ -104,6 +105,9 @@ class DeactivateAccountHandler(BaseHandler):
await self.store.user_set_password_hash(user_id, None)
+ user = UserID.from_string(user_id)
+ await self._profile_handler.set_active(user, False, False)
+
# Add the user to a table of users pending deactivation (ie.
# removal from all the rooms they're a member of)
await self.store.add_user_pending_deactivation(user_id)
diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py
index a9bd431486..50cea3f378 100644
--- a/synapse/handlers/device.py
+++ b/synapse/handlers/device.py
@@ -26,6 +26,7 @@ from synapse.api.errors import (
FederationDeniedError,
HttpResponseException,
RequestSendFailed,
+ SynapseError,
)
from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.types import RoomStreamToken, get_domain_from_id
@@ -39,6 +40,8 @@ from ._base import BaseHandler
logger = logging.getLogger(__name__)
+MAX_DEVICE_DISPLAY_NAME_LEN = 100
+
class DeviceWorkerHandler(BaseHandler):
def __init__(self, hs):
@@ -225,6 +228,22 @@ class DeviceWorkerHandler(BaseHandler):
return result
+ @defer.inlineCallbacks
+ def on_federation_query_user_devices(self, user_id):
+ stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
+ master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master")
+ self_signing_key = yield self.store.get_e2e_cross_signing_key(
+ user_id, "self_signing"
+ )
+
+ return {
+ "user_id": user_id,
+ "stream_id": stream_id,
+ "devices": devices,
+ "master_key": master_key,
+ "self_signing_key": self_signing_key,
+ }
+
class DeviceHandler(DeviceWorkerHandler):
def __init__(self, hs):
@@ -239,9 +258,6 @@ class DeviceHandler(DeviceWorkerHandler):
federation_registry.register_edu_handler(
"m.device_list_update", self.device_list_updater.incoming_device_list_update
)
- federation_registry.register_query_handler(
- "user_devices", self.on_federation_query_user_devices
- )
hs.get_distributor().observe("user_left_room", self.user_left_room)
@@ -391,9 +407,18 @@ class DeviceHandler(DeviceWorkerHandler):
defer.Deferred:
"""
+ # Reject a new displayname which is too long.
+ new_display_name = content.get("display_name")
+ if new_display_name and len(new_display_name) > MAX_DEVICE_DISPLAY_NAME_LEN:
+ raise SynapseError(
+ 400,
+ "Device display name is too long (max %i)"
+ % (MAX_DEVICE_DISPLAY_NAME_LEN,),
+ )
+
try:
yield self.store.update_device(
- user_id, device_id, new_display_name=content.get("display_name")
+ user_id, device_id, new_display_name=new_display_name
)
yield self.notify_device_update(user_id, [device_id])
except errors.StoreError as e:
@@ -457,22 +482,6 @@ class DeviceHandler(DeviceWorkerHandler):
self.notifier.on_new_event("device_list_key", position, users=[from_user_id])
@defer.inlineCallbacks
- def on_federation_query_user_devices(self, user_id):
- stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
- master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master")
- self_signing_key = yield self.store.get_e2e_cross_signing_key(
- user_id, "self_signing"
- )
-
- return {
- "user_id": user_id,
- "stream_id": stream_id,
- "devices": devices,
- "master_key": master_key,
- "self_signing_key": self_signing_key,
- }
-
- @defer.inlineCallbacks
def user_left_room(self, user, room_id):
user_id = user.to_string()
room_ids = yield self.store.get_rooms_for_user(user_id)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index e9441bbeff..1ec61340ad 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -65,7 +65,7 @@ from synapse.replication.http.federation import (
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
from synapse.state import StateResolutionStore, resolve_events_with_store
from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour
-from synapse.types import StateMap, UserID, get_domain_from_id
+from synapse.types import JsonDict, StateMap, UserID, get_domain_from_id
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.distributor import user_joined_room
from synapse.util.retryutils import NotRetryingDestination
@@ -187,7 +187,7 @@ class FederationHandler(BaseHandler):
room_id = pdu.room_id
event_id = pdu.event_id
- logger.info("handling received PDU: %s", pdu)
+ logger.info("[%s %s] handling received PDU: %s", room_id, event_id, pdu)
# We reprocess pdus when we have seen them only as outliers
existing = await self.store.get_event(
@@ -302,6 +302,14 @@ class FederationHandler(BaseHandler):
room_id,
event_id,
)
+ elif missing_prevs:
+ logger.info(
+ "[%s %s] Not recursively fetching %d missing prev_events: %s",
+ room_id,
+ event_id,
+ len(missing_prevs),
+ shortstr(missing_prevs),
+ )
if prevs - seen:
# We've still not been able to get all of the prev_events for this event.
@@ -346,12 +354,6 @@ class FederationHandler(BaseHandler):
affected=pdu.event_id,
)
- logger.info(
- "Event %s is missing prev_events: calculating state for a "
- "backwards extremity",
- event_id,
- )
-
# Calculate the state after each of the previous events, and
# resolve them to find the correct state at the current event.
event_map = {event_id: pdu}
@@ -369,7 +371,10 @@ class FederationHandler(BaseHandler):
# know about
for p in prevs - seen:
logger.info(
- "Requesting state at missing prev_event %s", event_id,
+ "[%s %s] Requesting state at missing prev_event %s",
+ room_id,
+ event_id,
+ p,
)
with nested_logging_context(p):
@@ -405,7 +410,6 @@ class FederationHandler(BaseHandler):
evs = await self.store.get_events(
list(state_map.values()),
get_prev_content=False,
- redact_behaviour=EventRedactBehaviour.AS_IS,
)
event_map.update(evs)
@@ -1156,7 +1160,7 @@ class FederationHandler(BaseHandler):
Logs a warning if we can't find the given event.
"""
- room_version = await self.store.get_room_version_id(room_id)
+ room_version = await self.store.get_room_version(room_id)
event_infos = []
@@ -1230,13 +1234,12 @@ class FederationHandler(BaseHandler):
)
raise SynapseError(http_client.BAD_REQUEST, "Too many auth_events")
- @defer.inlineCallbacks
- def send_invite(self, target_host, event):
+ async def send_invite(self, target_host, event):
""" Sends the invite to the remote server for signing.
Invites must be signed by the invitee's server before distribution.
"""
- pdu = yield self.federation_client.send_invite(
+ pdu = await self.federation_client.send_invite(
destination=target_host,
room_id=event.room_id,
event_id=event.event_id,
@@ -1245,17 +1248,16 @@ class FederationHandler(BaseHandler):
return pdu
- @defer.inlineCallbacks
- def on_event_auth(self, event_id):
- event = yield self.store.get_event(event_id)
- auth = yield self.store.get_auth_chain(
+ async def on_event_auth(self, event_id: str) -> List[EventBase]:
+ event = await self.store.get_event(event_id)
+ auth = await self.store.get_auth_chain(
[auth_id for auth_id in event.auth_event_ids()], include_given=True
)
- return [e for e in auth]
+ return list(auth)
- @log_function
- @defer.inlineCallbacks
- def do_invite_join(self, target_hosts, room_id, joinee, content):
+ async def do_invite_join(
+ self, target_hosts: Iterable[str], room_id: str, joinee: str, content: JsonDict
+ ) -> None:
""" Attempts to join the `joinee` to the room `room_id` via the
servers contained in `target_hosts`.
@@ -1268,17 +1270,17 @@ class FederationHandler(BaseHandler):
have finished processing the join.
Args:
- target_hosts (Iterable[str]): List of servers to attempt to join the room with.
+ target_hosts: List of servers to attempt to join the room with.
- room_id (str): The ID of the room to join.
+ room_id: The ID of the room to join.
- joinee (str): The User ID of the joining user.
+ joinee: The User ID of the joining user.
- content (dict): The event content to use for the join event.
+ content: The event content to use for the join event.
"""
logger.debug("Joining %s to %s", joinee, room_id)
- origin, event, room_version_obj = yield self._make_and_verify_event(
+ origin, event, room_version_obj = await self._make_and_verify_event(
target_hosts,
room_id,
joinee,
@@ -1294,7 +1296,7 @@ class FederationHandler(BaseHandler):
self.room_queues[room_id] = []
- yield self._clean_room_for_join(room_id)
+ await self._clean_room_for_join(room_id)
handled_events = set()
@@ -1307,9 +1309,8 @@ class FederationHandler(BaseHandler):
except ValueError:
pass
- event_format_version = room_version_obj.event_format
- ret = yield self.federation_client.send_join(
- target_hosts, event, event_format_version
+ ret = await self.federation_client.send_join(
+ target_hosts, event, room_version_obj
)
origin = ret["origin"]
@@ -1327,7 +1328,7 @@ class FederationHandler(BaseHandler):
logger.debug("do_invite_join event: %s", event)
try:
- yield self.store.store_room(
+ await self.store.store_room(
room_id=room_id,
room_creator_user_id="",
is_public=False,
@@ -1337,13 +1338,13 @@ class FederationHandler(BaseHandler):
# FIXME
pass
- yield self._persist_auth_tree(
+ await self._persist_auth_tree(
origin, auth_chain, state, event, room_version_obj
)
# Check whether this room is the result of an upgrade of a room we already know
# about. If so, migrate over user information
- predecessor = yield self.store.get_room_predecessor(room_id)
+ predecessor = await self.store.get_room_predecessor(room_id)
if not predecessor or not isinstance(predecessor.get("room_id"), str):
return
old_room_id = predecessor["room_id"]
@@ -1353,7 +1354,7 @@ class FederationHandler(BaseHandler):
# We retrieve the room member handler here as to not cause a cyclic dependency
member_handler = self.hs.get_room_member_handler()
- yield member_handler.transfer_room_state_on_room_upgrade(
+ await member_handler.transfer_room_state_on_room_upgrade(
old_room_id, room_id
)
@@ -1370,8 +1371,6 @@ class FederationHandler(BaseHandler):
run_in_background(self._handle_queued_pdus, room_queue)
- return True
-
async def _handle_queued_pdus(self, room_queue):
"""Process PDUs which got queued up while we were busy send_joining.
@@ -1394,20 +1393,17 @@ class FederationHandler(BaseHandler):
"Error handling queued PDU %s from %s: %s", p.event_id, origin, e
)
- @defer.inlineCallbacks
- @log_function
- def on_make_join_request(self, origin, room_id, user_id):
+ async def on_make_join_request(
+ self, origin: str, room_id: str, user_id: str
+ ) -> EventBase:
""" We've received a /make_join/ request, so we create a partial
join event for the room and return that. We do *not* persist or
process it until the other server has signed it and sent it back.
Args:
- origin (str): The (verified) server name of the requesting server.
- room_id (str): Room to create join event in
- user_id (str): The user to create the join for
-
- Returns:
- Deferred[FrozenEvent]
+ origin: The (verified) server name of the requesting server.
+ room_id: Room to create join event in
+ user_id: The user to create the join for
"""
if get_domain_from_id(user_id) != origin:
logger.info(
@@ -1419,7 +1415,7 @@ class FederationHandler(BaseHandler):
event_content = {"membership": Membership.JOIN}
- room_version = yield self.store.get_room_version_id(room_id)
+ room_version = await self.store.get_room_version_id(room_id)
builder = self.event_builder_factory.new(
room_version,
@@ -1433,14 +1429,14 @@ class FederationHandler(BaseHandler):
)
try:
- event, context = yield self.event_creation_handler.create_new_client_event(
+ event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
except AuthError as e:
logger.warning("Failed to create join to %s because %s", room_id, e)
raise e
- event_allowed = yield self.third_party_event_rules.check_event_allowed(
+ event_allowed = await self.third_party_event_rules.check_event_allowed(
event, context
)
if not event_allowed:
@@ -1451,15 +1447,13 @@ class FederationHandler(BaseHandler):
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
- yield self.auth.check_from_context(
+ await self.auth.check_from_context(
room_version, event, context, do_sig_check=False
)
return event
- @defer.inlineCallbacks
- @log_function
- def on_send_join_request(self, origin, pdu):
+ async def on_send_join_request(self, origin, pdu):
""" We have received a join event for a room. Fully process it and
respond with the current state and auth chains.
"""
@@ -1496,9 +1490,9 @@ class FederationHandler(BaseHandler):
# would introduce the danger of backwards-compatibility problems.
event.internal_metadata.send_on_behalf_of = origin
- context = yield self._handle_new_event(origin, event)
+ context = await self._handle_new_event(origin, event)
- event_allowed = yield self.third_party_event_rules.check_event_allowed(
+ event_allowed = await self.third_party_event_rules.check_event_allowed(
event, context
)
if not event_allowed:
@@ -1516,19 +1510,18 @@ class FederationHandler(BaseHandler):
if event.type == EventTypes.Member:
if event.content["membership"] == Membership.JOIN:
user = UserID.from_string(event.state_key)
- yield self.user_joined_room(user, event.room_id)
+ await self.user_joined_room(user, event.room_id)
- prev_state_ids = yield context.get_prev_state_ids()
+ prev_state_ids = await context.get_prev_state_ids()
state_ids = list(prev_state_ids.values())
- auth_chain = yield self.store.get_auth_chain(state_ids)
+ auth_chain = await self.store.get_auth_chain(state_ids)
- state = yield self.store.get_events(list(prev_state_ids.values()))
+ state = await self.store.get_events(list(prev_state_ids.values()))
return {"state": list(state.values()), "auth_chain": auth_chain}
- @defer.inlineCallbacks
- def on_invite_request(
+ async def on_invite_request(
self, origin: str, event: EventBase, room_version: RoomVersion
):
""" We've got an invite event. Process and persist it. Sign it.
@@ -1538,15 +1531,22 @@ class FederationHandler(BaseHandler):
if event.state_key is None:
raise SynapseError(400, "The invite event did not have a state key")
- is_blocked = yield self.store.is_room_blocked(event.room_id)
+ is_blocked = await self.store.is_room_blocked(event.room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
if self.hs.config.block_non_admin_invites:
raise SynapseError(403, "This server does not accept room invites")
+ is_published = yield self.store.is_room_published(event.room_id)
+
if not self.spam_checker.user_may_invite(
- event.sender, event.state_key, event.room_id
+ event.sender,
+ event.state_key,
+ None,
+ room_id=event.room_id,
+ new_room=False,
+ published_room=is_published,
):
raise SynapseError(
403, "This user is not permitted to send invites to this server/user"
@@ -1581,14 +1581,15 @@ class FederationHandler(BaseHandler):
)
)
- context = yield self.state_handler.compute_event_context(event)
- yield self.persist_events_and_notify([(event, context)])
+ context = await self.state_handler.compute_event_context(event)
+ await self.persist_events_and_notify([(event, context)])
return event
- @defer.inlineCallbacks
- def do_remotely_reject_invite(self, target_hosts, room_id, user_id, content):
- origin, event, room_version = yield self._make_and_verify_event(
+ async def do_remotely_reject_invite(
+ self, target_hosts: Iterable[str], room_id: str, user_id: str, content: JsonDict
+ ) -> EventBase:
+ origin, event, room_version = await self._make_and_verify_event(
target_hosts, room_id, user_id, "leave", content=content
)
# Mark as outlier as we don't have any state for this event; we're not
@@ -1604,22 +1605,27 @@ class FederationHandler(BaseHandler):
except ValueError:
pass
- yield self.federation_client.send_leave(target_hosts, event)
+ await self.federation_client.send_leave(target_hosts, event)
- context = yield self.state_handler.compute_event_context(event)
- yield self.persist_events_and_notify([(event, context)])
+ context = await self.state_handler.compute_event_context(event)
+ await self.persist_events_and_notify([(event, context)])
return event
- @defer.inlineCallbacks
- def _make_and_verify_event(
- self, target_hosts, room_id, user_id, membership, content={}, params=None
- ):
+ async def _make_and_verify_event(
+ self,
+ target_hosts: Iterable[str],
+ room_id: str,
+ user_id: str,
+ membership: str,
+ content: JsonDict = {},
+ params: Optional[Dict[str, str]] = None,
+ ) -> Tuple[str, EventBase, RoomVersion]:
(
origin,
event,
room_version,
- ) = yield self.federation_client.make_membership_event(
+ ) = await self.federation_client.make_membership_event(
target_hosts, room_id, user_id, membership, content, params=params
)
@@ -1633,20 +1639,17 @@ class FederationHandler(BaseHandler):
assert event.room_id == room_id
return origin, event, room_version
- @defer.inlineCallbacks
- @log_function
- def on_make_leave_request(self, origin, room_id, user_id):
+ async def on_make_leave_request(
+ self, origin: str, room_id: str, user_id: str
+ ) -> EventBase:
""" We've received a /make_leave/ request, so we create a partial
leave event for the room and return that. We do *not* persist or
process it until the other server has signed it and sent it back.
Args:
- origin (str): The (verified) server name of the requesting server.
- room_id (str): Room to create leave event in
- user_id (str): The user to create the leave for
-
- Returns:
- Deferred[FrozenEvent]
+ origin: The (verified) server name of the requesting server.
+ room_id: Room to create leave event in
+ user_id: The user to create the leave for
"""
if get_domain_from_id(user_id) != origin:
logger.info(
@@ -1656,7 +1659,7 @@ class FederationHandler(BaseHandler):
)
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
- room_version = yield self.store.get_room_version_id(room_id)
+ room_version = await self.store.get_room_version_id(room_id)
builder = self.event_builder_factory.new(
room_version,
{
@@ -1668,11 +1671,11 @@ class FederationHandler(BaseHandler):
},
)
- event, context = yield self.event_creation_handler.create_new_client_event(
+ event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
- event_allowed = yield self.third_party_event_rules.check_event_allowed(
+ event_allowed = await self.third_party_event_rules.check_event_allowed(
event, context
)
if not event_allowed:
@@ -1684,7 +1687,7 @@ class FederationHandler(BaseHandler):
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
- yield self.auth.check_from_context(
+ await self.auth.check_from_context(
room_version, event, context, do_sig_check=False
)
except AuthError as e:
@@ -1693,9 +1696,7 @@ class FederationHandler(BaseHandler):
return event
- @defer.inlineCallbacks
- @log_function
- def on_send_leave_request(self, origin, pdu):
+ async def on_send_leave_request(self, origin, pdu):
""" We have received a leave event for a room. Fully process it."""
event = pdu
@@ -1715,9 +1716,9 @@ class FederationHandler(BaseHandler):
event.internal_metadata.outlier = False
- context = yield self._handle_new_event(origin, event)
+ context = await self._handle_new_event(origin, event)
- event_allowed = yield self.third_party_event_rules.check_event_allowed(
+ event_allowed = await self.third_party_event_rules.check_event_allowed(
event, context
)
if not event_allowed:
@@ -1798,6 +1799,9 @@ class FederationHandler(BaseHandler):
if not in_room:
raise AuthError(403, "Host not in room.")
+ # Synapse asks for 100 events per backfill request. Do not allow more.
+ limit = min(limit, 100)
+
events = yield self.store.get_backfill_events(room_id, pdu_list, limit)
events = yield filter_events_for_server(self.storage, origin, events)
@@ -1839,11 +1843,10 @@ class FederationHandler(BaseHandler):
def get_min_depth_for_context(self, context):
return self.store.get_min_depth(context)
- @defer.inlineCallbacks
- def _handle_new_event(
+ async def _handle_new_event(
self, origin, event, state=None, auth_events=None, backfilled=False
):
- context = yield self._prep_event(
+ context = await self._prep_event(
origin, event, state=state, auth_events=auth_events, backfilled=backfilled
)
@@ -1856,11 +1859,11 @@ class FederationHandler(BaseHandler):
and not backfilled
and not context.rejected
):
- yield self.action_generator.handle_push_actions_for_event(
+ await self.action_generator.handle_push_actions_for_event(
event, context
)
- yield self.persist_events_and_notify(
+ await self.persist_events_and_notify(
[(event, context)], backfilled=backfilled
)
success = True
@@ -1872,13 +1875,12 @@ class FederationHandler(BaseHandler):
return context
- @defer.inlineCallbacks
- def _handle_new_events(
+ async def _handle_new_events(
self,
origin: str,
event_infos: Iterable[_NewEventInfo],
backfilled: bool = False,
- ):
+ ) -> None:
"""Creates the appropriate contexts and persists events. The events
should not depend on one another, e.g. this should be used to persist
a bunch of outliers, but not a chunk of individual events that depend
@@ -1887,11 +1889,10 @@ class FederationHandler(BaseHandler):
Notifies about the events where appropriate.
"""
- @defer.inlineCallbacks
- def prep(ev_info: _NewEventInfo):
+ async def prep(ev_info: _NewEventInfo):
event = ev_info.event
with nested_logging_context(suffix=event.event_id):
- res = yield self._prep_event(
+ res = await self._prep_event(
origin,
event,
state=ev_info.state,
@@ -1900,14 +1901,14 @@ class FederationHandler(BaseHandler):
)
return res
- contexts = yield make_deferred_yieldable(
+ contexts = await make_deferred_yieldable(
defer.gatherResults(
[run_in_background(prep, ev_info) for ev_info in event_infos],
consumeErrors=True,
)
)
- yield self.persist_events_and_notify(
+ await self.persist_events_and_notify(
[
(ev_info.event, context)
for ev_info, context in zip(event_infos, contexts)
@@ -1915,15 +1916,14 @@ class FederationHandler(BaseHandler):
backfilled=backfilled,
)
- @defer.inlineCallbacks
- def _persist_auth_tree(
+ async def _persist_auth_tree(
self,
origin: str,
auth_events: List[EventBase],
state: List[EventBase],
event: EventBase,
room_version: RoomVersion,
- ):
+ ) -> None:
"""Checks the auth chain is valid (and passes auth checks) for the
state and event. Then persists the auth chain and state atomically.
Persists the event separately. Notifies about the persisted events
@@ -1938,14 +1938,11 @@ class FederationHandler(BaseHandler):
event
room_version: The room version we expect this room to have, and
will raise if it doesn't match the version in the create event.
-
- Returns:
- Deferred
"""
events_to_context = {}
for e in itertools.chain(auth_events, state):
e.internal_metadata.outlier = True
- ctx = yield self.state_handler.compute_event_context(e)
+ ctx = await self.state_handler.compute_event_context(e)
events_to_context[e.event_id] = ctx
event_map = {
@@ -1977,12 +1974,8 @@ class FederationHandler(BaseHandler):
missing_auth_events.add(e_id)
for e_id in missing_auth_events:
- m_ev = yield self.federation_client.get_pdu(
- [origin],
- e_id,
- room_version=room_version.identifier,
- outlier=True,
- timeout=10000,
+ m_ev = await self.federation_client.get_pdu(
+ [origin], e_id, room_version=room_version, outlier=True, timeout=10000,
)
if m_ev and m_ev.event_id == e_id:
event_map[e_id] = m_ev
@@ -2013,91 +2006,74 @@ class FederationHandler(BaseHandler):
raise
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
- yield self.persist_events_and_notify(
+ await self.persist_events_and_notify(
[
(e, events_to_context[e.event_id])
for e in itertools.chain(auth_events, state)
]
)
- new_event_context = yield self.state_handler.compute_event_context(
+ new_event_context = await self.state_handler.compute_event_context(
event, old_state=state
)
- yield self.persist_events_and_notify([(event, new_event_context)])
+ await self.persist_events_and_notify([(event, new_event_context)])
- @defer.inlineCallbacks
- def _prep_event(
+ async def _prep_event(
self,
origin: str,
event: EventBase,
state: Optional[Iterable[EventBase]],
auth_events: Optional[StateMap[EventBase]],
backfilled: bool,
- ):
- """
-
- Args:
- origin:
- event:
- state:
- auth_events:
- backfilled:
-
- Returns:
- Deferred, which resolves to synapse.events.snapshot.EventContext
- """
- context = yield self.state_handler.compute_event_context(event, old_state=state)
+ ) -> EventContext:
+ context = await self.state_handler.compute_event_context(event, old_state=state)
if not auth_events:
- prev_state_ids = yield context.get_prev_state_ids()
- auth_events_ids = yield self.auth.compute_auth_events(
+ prev_state_ids = await context.get_prev_state_ids()
+ auth_events_ids = await self.auth.compute_auth_events(
event, prev_state_ids, for_verification=True
)
- auth_events = yield self.store.get_events(auth_events_ids)
+ auth_events = await self.store.get_events(auth_events_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
# This is a hack to fix some old rooms where the initial join event
# didn't reference the create event in its auth events.
if event.type == EventTypes.Member and not event.auth_event_ids():
if len(event.prev_event_ids()) == 1 and event.depth < 5:
- c = yield self.store.get_event(
+ c = await self.store.get_event(
event.prev_event_ids()[0], allow_none=True
)
if c and c.type == EventTypes.Create:
auth_events[(c.type, c.state_key)] = c
- context = yield self.do_auth(origin, event, context, auth_events=auth_events)
+ context = await self.do_auth(origin, event, context, auth_events=auth_events)
if not context.rejected:
- yield self._check_for_soft_fail(event, state, backfilled)
+ await self._check_for_soft_fail(event, state, backfilled)
if event.type == EventTypes.GuestAccess and not context.rejected:
- yield self.maybe_kick_guest_users(event)
+ await self.maybe_kick_guest_users(event)
return context
- @defer.inlineCallbacks
- def _check_for_soft_fail(
+ async def _check_for_soft_fail(
self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool
- ):
- """Checks if we should soft fail the event, if so marks the event as
+ ) -> None:
+ """Checks if we should soft fail the event; if so, marks the event as
such.
Args:
event
state: The state at the event if we don't have all the event's prev events
backfilled: Whether the event is from backfill
-
- Returns:
- Deferred
"""
# For new (non-backfilled and non-outlier) events we check if the event
# passes auth based on the current state. If it doesn't then we
# "soft-fail" the event.
do_soft_fail_check = not backfilled and not event.internal_metadata.is_outlier()
if do_soft_fail_check:
- extrem_ids = yield self.store.get_latest_event_ids_in_room(event.room_id)
+ extrem_ids = await self.store.get_latest_event_ids_in_room(event.room_id)
extrem_ids = set(extrem_ids)
prev_event_ids = set(event.prev_event_ids())
@@ -2108,7 +2084,7 @@ class FederationHandler(BaseHandler):
do_soft_fail_check = False
if do_soft_fail_check:
- room_version = yield self.store.get_room_version_id(event.room_id)
+ room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
# Calculate the "current state".
@@ -2125,19 +2101,19 @@ class FederationHandler(BaseHandler):
# given state at the event. This should correctly handle cases
# like bans, especially with state res v2.
- state_sets = yield self.state_store.get_state_groups(
+ state_sets = await self.state_store.get_state_groups(
event.room_id, extrem_ids
)
state_sets = list(state_sets.values())
state_sets.append(state)
- current_state_ids = yield self.state_handler.resolve_events(
+ current_state_ids = await self.state_handler.resolve_events(
room_version, state_sets, event
)
current_state_ids = {
k: e.event_id for k, e in iteritems(current_state_ids)
}
else:
- current_state_ids = yield self.state_handler.get_current_state_ids(
+ current_state_ids = await self.state_handler.get_current_state_ids(
event.room_id, latest_event_ids=extrem_ids
)
@@ -2153,7 +2129,7 @@ class FederationHandler(BaseHandler):
e for k, e in iteritems(current_state_ids) if k in auth_types
]
- current_auth_events = yield self.store.get_events(current_state_ids)
+ current_auth_events = await self.store.get_events(current_state_ids)
current_auth_events = {
(e.type, e.state_key): e for e in current_auth_events.values()
}
@@ -2166,15 +2142,14 @@ class FederationHandler(BaseHandler):
logger.warning("Soft-failing %r because %s", event, e)
event.internal_metadata.soft_failed = True
- @defer.inlineCallbacks
- def on_query_auth(
+ async def on_query_auth(
self, origin, event_id, room_id, remote_auth_chain, rejects, missing
):
- in_room = yield self.auth.check_host_in_room(room_id, origin)
+ in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
- event = yield self.store.get_event(
+ event = await self.store.get_event(
event_id, allow_none=False, check_room_id=room_id
)
@@ -2182,57 +2157,61 @@ class FederationHandler(BaseHandler):
# don't want to fall into the trap of `missing` being wrong.
for e in remote_auth_chain:
try:
- yield self._handle_new_event(origin, e)
+ await self._handle_new_event(origin, e)
except AuthError:
pass
# Now get the current auth_chain for the event.
- local_auth_chain = yield self.store.get_auth_chain(
+ local_auth_chain = await self.store.get_auth_chain(
[auth_id for auth_id in event.auth_event_ids()], include_given=True
)
# TODO: Check if we would now reject event_id. If so we need to tell
# everyone.
- ret = yield self.construct_auth_difference(local_auth_chain, remote_auth_chain)
+ ret = await self.construct_auth_difference(local_auth_chain, remote_auth_chain)
logger.debug("on_query_auth returning: %s", ret)
return ret
- @defer.inlineCallbacks
- def on_get_missing_events(
+ async def on_get_missing_events(
self, origin, room_id, earliest_events, latest_events, limit
):
- in_room = yield self.auth.check_host_in_room(room_id, origin)
+ in_room = await self.auth.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
+ # Only allow up to 20 events to be retrieved per request.
limit = min(limit, 20)
- missing_events = yield self.store.get_missing_events(
+ missing_events = await self.store.get_missing_events(
room_id=room_id,
earliest_events=earliest_events,
latest_events=latest_events,
limit=limit,
)
- missing_events = yield filter_events_for_server(
+ missing_events = await filter_events_for_server(
self.storage, origin, missing_events
)
return missing_events
- @defer.inlineCallbacks
- @log_function
- def do_auth(self, origin, event, context, auth_events):
+ async def do_auth(
+ self,
+ origin: str,
+ event: EventBase,
+ context: EventContext,
+ auth_events: StateMap[EventBase],
+ ) -> EventContext:
"""
Args:
- origin (str):
- event (synapse.events.EventBase):
- context (synapse.events.snapshot.EventContext):
- auth_events (dict[(str, str)->synapse.events.EventBase]):
+ origin:
+ event:
+ context:
+ auth_events:
Map from (event_type, state_key) to event
Normally, our calculated auth_events based on the state of the room
@@ -2242,13 +2221,13 @@ class FederationHandler(BaseHandler):
Also NB that this function adds entries to it.
Returns:
- defer.Deferred[EventContext]: updated context object
+ updated context object
"""
- room_version = yield self.store.get_room_version_id(event.room_id)
+ room_version = await self.store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
try:
- context = yield self._update_auth_events_and_context_for_auth(
+ context = await self._update_auth_events_and_context_for_auth(
origin, event, context, auth_events
)
except Exception:
@@ -2270,10 +2249,13 @@ class FederationHandler(BaseHandler):
return context
- @defer.inlineCallbacks
- def _update_auth_events_and_context_for_auth(
- self, origin, event, context, auth_events
- ):
+ async def _update_auth_events_and_context_for_auth(
+ self,
+ origin: str,
+ event: EventBase,
+ context: EventContext,
+ auth_events: StateMap[EventBase],
+ ) -> EventContext:
"""Helper for do_auth. See there for docs.
Checks whether a given event has the expected auth events. If it
@@ -2281,16 +2263,16 @@ class FederationHandler(BaseHandler):
we can come to a consensus (e.g. if one server missed some valid
state).
- This attempts to resovle any potential divergence of state between
+ This attempts to resolve any potential divergence of state between
servers, but is not essential and so failures should not block further
processing of the event.
Args:
- origin (str):
- event (synapse.events.EventBase):
- context (synapse.events.snapshot.EventContext):
+ origin:
+ event:
+ context:
- auth_events (dict[(str, str)->synapse.events.EventBase]):
+ auth_events:
Map from (event_type, state_key) to event
Normally, our calculated auth_events based on the state of the room
@@ -2301,7 +2283,7 @@ class FederationHandler(BaseHandler):
Also NB that this function adds entries to it.
Returns:
- defer.Deferred[EventContext]: updated context
+ updated context
"""
event_auth_events = set(event.auth_event_ids())
@@ -2315,7 +2297,7 @@ class FederationHandler(BaseHandler):
#
# we start by checking if they are in the store, and then try calling /event_auth/.
if missing_auth:
- have_events = yield self.store.have_seen_events(missing_auth)
+ have_events = await self.store.have_seen_events(missing_auth)
logger.debug("Events %s are in the store", have_events)
missing_auth.difference_update(have_events)
@@ -2324,7 +2306,7 @@ class FederationHandler(BaseHandler):
logger.info("auth_events contains unknown events: %s", missing_auth)
try:
try:
- remote_auth_chain = yield self.federation_client.get_event_auth(
+ remote_auth_chain = await self.federation_client.get_event_auth(
origin, event.room_id, event.event_id
)
except RequestSendFailed as e:
@@ -2333,7 +2315,7 @@ class FederationHandler(BaseHandler):
logger.info("Failed to get event auth from remote: %s", e)
return context
- seen_remotes = yield self.store.have_seen_events(
+ seen_remotes = await self.store.have_seen_events(
[e.event_id for e in remote_auth_chain]
)
@@ -2356,7 +2338,7 @@ class FederationHandler(BaseHandler):
logger.debug(
"do_auth %s missing_auth: %s", event.event_id, e.event_id
)
- yield self._handle_new_event(origin, e, auth_events=auth)
+ await self._handle_new_event(origin, e, auth_events=auth)
if e.event_id in event_auth_events:
auth_events[(e.type, e.state_key)] = e
@@ -2390,7 +2372,7 @@ class FederationHandler(BaseHandler):
# XXX: currently this checks for redactions but I'm not convinced that is
# necessary?
- different_events = yield self.store.get_events_as_list(different_auth)
+ different_events = await self.store.get_events_as_list(different_auth)
for d in different_events:
if d.room_id != event.room_id:
@@ -2416,8 +2398,8 @@ class FederationHandler(BaseHandler):
remote_auth_events.update({(d.type, d.state_key): d for d in different_events})
remote_state = remote_auth_events.values()
- room_version = yield self.store.get_room_version_id(event.room_id)
- new_state = yield self.state_handler.resolve_events(
+ room_version = await self.store.get_room_version_id(event.room_id)
+ new_state = await self.state_handler.resolve_events(
room_version, (local_state, remote_state), event
)
@@ -2432,27 +2414,27 @@ class FederationHandler(BaseHandler):
auth_events.update(new_state)
- context = yield self._update_context_for_auth_events(
+ context = await self._update_context_for_auth_events(
event, context, auth_events
)
return context
- @defer.inlineCallbacks
- def _update_context_for_auth_events(self, event, context, auth_events):
+ async def _update_context_for_auth_events(
+ self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
+ ) -> EventContext:
"""Update the state_ids in an event context after auth event resolution,
storing the changes as a new state group.
Args:
- event (Event): The event we're handling the context for
+ event: The event we're handling the context for
- context (synapse.events.snapshot.EventContext): initial event context
+ context: initial event context
- auth_events (dict[(str, str)->EventBase]): Events to update in the event
- context.
+ auth_events: Events to update in the event context.
Returns:
- Deferred[EventContext]: new event context
+ new event context
"""
# exclude the state key of the new event from the current_state in the context.
if event.is_state():
@@ -2463,19 +2445,19 @@ class FederationHandler(BaseHandler):
k: a.event_id for k, a in iteritems(auth_events) if k != event_key
}
- current_state_ids = yield context.get_current_state_ids()
+ current_state_ids = await context.get_current_state_ids()
current_state_ids = dict(current_state_ids)
current_state_ids.update(state_updates)
- prev_state_ids = yield context.get_prev_state_ids()
+ prev_state_ids = await context.get_prev_state_ids()
prev_state_ids = dict(prev_state_ids)
prev_state_ids.update({k: a.event_id for k, a in iteritems(auth_events)})
# create a new state group as a delta from the existing one.
prev_group = context.state_group
- state_group = yield self.state_store.store_state_group(
+ state_group = await self.state_store.store_state_group(
event.event_id,
event.room_id,
prev_group=prev_group,
@@ -2492,8 +2474,9 @@ class FederationHandler(BaseHandler):
delta_ids=state_updates,
)
- @defer.inlineCallbacks
- def construct_auth_difference(self, local_auth, remote_auth):
+ async def construct_auth_difference(
+ self, local_auth: Iterable[EventBase], remote_auth: Iterable[EventBase]
+ ) -> Dict:
""" Given a local and remote auth chain, find the differences. This
assumes that we have already processed all events in remote_auth
@@ -2602,7 +2585,7 @@ class FederationHandler(BaseHandler):
reason_map = {}
for e in base_remote_rejected:
- reason = yield self.store.get_rejection_reason(e.event_id)
+ reason = await self.store.get_rejection_reason(e.event_id)
if reason is None:
# TODO: e is not in the current state, so we should
# construct some proof of that.
@@ -2687,33 +2670,31 @@ class FederationHandler(BaseHandler):
destinations, room_id, event_dict
)
- @defer.inlineCallbacks
- @log_function
- def on_exchange_third_party_invite_request(self, room_id, event_dict):
+ async def on_exchange_third_party_invite_request(
+ self, room_id: str, event_dict: JsonDict
+ ) -> None:
"""Handle an exchange_third_party_invite request from a remote server
The remote server will call this when it wants to turn a 3pid invite
into a normal m.room.member invite.
Args:
- room_id (str): The ID of the room.
+ room_id: The ID of the room.
event_dict (dict[str, Any]): Dictionary containing the event body.
- Returns:
- Deferred: resolves (to None)
"""
- room_version = yield self.store.get_room_version_id(room_id)
+ room_version = await self.store.get_room_version_id(room_id)
# NB: event_dict has a particular specced format we might need to fudge
# if we change event formats too much.
builder = self.event_builder_factory.new(room_version, event_dict)
- event, context = yield self.event_creation_handler.create_new_client_event(
+ event, context = await self.event_creation_handler.create_new_client_event(
builder=builder
)
- event_allowed = yield self.third_party_event_rules.check_event_allowed(
+ event_allowed = await self.third_party_event_rules.check_event_allowed(
event, context
)
if not event_allowed:
@@ -2724,16 +2705,16 @@ class FederationHandler(BaseHandler):
403, "This event is not allowed in this context", Codes.FORBIDDEN
)
- event, context = yield self.add_display_name_to_third_party_invite(
+ event, context = await self.add_display_name_to_third_party_invite(
room_version, event_dict, event, context
)
try:
- yield self.auth.check_from_context(room_version, event, context)
+ await self.auth.check_from_context(room_version, event, context)
except AuthError as e:
logger.warning("Denying third party invite %r because %s", event, e)
raise e
- yield self._check_signature(event, context)
+ await self._check_signature(event, context)
# We need to tell the transaction queue to send this out, even
# though the sender isn't a local user.
@@ -2741,7 +2722,7 @@ class FederationHandler(BaseHandler):
# We retrieve the room member handler here as to not cause a cyclic dependency
member_handler = self.hs.get_room_member_handler()
- yield member_handler.send_membership_event(None, event, context)
+ await member_handler.send_membership_event(None, event, context)
@defer.inlineCallbacks
def add_display_name_to_third_party_invite(
@@ -2889,27 +2870,27 @@ class FederationHandler(BaseHandler):
if "valid" not in response or not response["valid"]:
raise AuthError(403, "Third party certificate was invalid")
- @defer.inlineCallbacks
- def persist_events_and_notify(self, event_and_contexts, backfilled=False):
+ async def persist_events_and_notify(
+ self,
+ event_and_contexts: Sequence[Tuple[EventBase, EventContext]],
+ backfilled: bool = False,
+ ) -> None:
"""Persists events and tells the notifier/pushers about them, if
necessary.
Args:
- event_and_contexts(list[tuple[FrozenEvent, EventContext]])
- backfilled (bool): Whether these events are a result of
+ event_and_contexts:
+ backfilled: Whether these events are a result of
backfilling or not
-
- Returns:
- Deferred
"""
if self.config.worker_app:
- yield self._send_events_to_master(
+ await self._send_events_to_master(
store=self.store,
event_and_contexts=event_and_contexts,
backfilled=backfilled,
)
else:
- max_stream_id = yield self.storage.persistence.persist_events(
+ max_stream_id = await self.storage.persistence.persist_events(
event_and_contexts, backfilled=backfilled
)
@@ -2920,15 +2901,17 @@ class FederationHandler(BaseHandler):
if not backfilled: # Never notify for backfilled events
for event, _ in event_and_contexts:
- yield self._notify_persisted_event(event, max_stream_id)
+ await self._notify_persisted_event(event, max_stream_id)
- def _notify_persisted_event(self, event, max_stream_id):
+ async def _notify_persisted_event(
+ self, event: EventBase, max_stream_id: int
+ ) -> None:
"""Checks to see if notifier/pushers should be notified about the
event or not.
Args:
- event (FrozenEvent)
- max_stream_id (int): The max_stream_id returned by persist_events
+ event:
+ max_stream_id: The max_stream_id returned by persist_events
"""
extra_users = []
@@ -2952,29 +2935,29 @@ class FederationHandler(BaseHandler):
event, event_stream_id, max_stream_id, extra_users=extra_users
)
- return self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id)
+ await self.pusher_pool.on_new_notifications(event_stream_id, max_stream_id)
- def _clean_room_for_join(self, room_id):
+ async def _clean_room_for_join(self, room_id: str) -> None:
"""Called to clean up any data in DB for a given room, ready for the
server to join the room.
Args:
- room_id (str)
+ room_id
"""
if self.config.worker_app:
- return self._clean_room_for_join_client(room_id)
+ await self._clean_room_for_join_client(room_id)
else:
- return self.store.clean_room_for_join(room_id)
+ await self.store.clean_room_for_join(room_id)
- def user_joined_room(self, user, room_id):
+ async def user_joined_room(self, user: UserID, room_id: str) -> None:
"""Called when a new user has joined the room
"""
if self.config.worker_app:
- return self._notify_user_membership_change(
+ await self._notify_user_membership_change(
room_id=room_id, user_id=user.to_string(), change="joined"
)
else:
- return defer.succeed(user_joined_room(self.distributor, user, room_id))
+ user_joined_room(self.distributor, user, room_id)
@defer.inlineCallbacks
def get_room_complexity(self, remote_room_hosts, room_id):
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index 319565510f..ad22415782 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -63,7 +63,7 @@ def _create_rerouter(func_name):
return f
-class GroupsLocalHandler(object):
+class GroupsLocalWorkerHandler(object):
def __init__(self, hs):
self.hs = hs
self.store = hs.get_datastore()
@@ -81,40 +81,17 @@ class GroupsLocalHandler(object):
self.profile_handler = hs.get_profile_handler()
- # Ensure attestations get renewed
- hs.get_groups_attestation_renewer()
-
# The following functions merely route the query to the local groups server
# or federation depending on if the group is local or remote
get_group_profile = _create_rerouter("get_group_profile")
- update_group_profile = _create_rerouter("update_group_profile")
get_rooms_in_group = _create_rerouter("get_rooms_in_group")
-
get_invited_users_in_group = _create_rerouter("get_invited_users_in_group")
-
- add_room_to_group = _create_rerouter("add_room_to_group")
- update_room_in_group = _create_rerouter("update_room_in_group")
- remove_room_from_group = _create_rerouter("remove_room_from_group")
-
- update_group_summary_room = _create_rerouter("update_group_summary_room")
- delete_group_summary_room = _create_rerouter("delete_group_summary_room")
-
- update_group_category = _create_rerouter("update_group_category")
- delete_group_category = _create_rerouter("delete_group_category")
get_group_category = _create_rerouter("get_group_category")
get_group_categories = _create_rerouter("get_group_categories")
-
- update_group_summary_user = _create_rerouter("update_group_summary_user")
- delete_group_summary_user = _create_rerouter("delete_group_summary_user")
-
- update_group_role = _create_rerouter("update_group_role")
- delete_group_role = _create_rerouter("delete_group_role")
get_group_role = _create_rerouter("get_group_role")
get_group_roles = _create_rerouter("get_group_roles")
- set_group_join_policy = _create_rerouter("set_group_join_policy")
-
@defer.inlineCallbacks
def get_group_summary(self, group_id, requester_user_id):
"""Get the group summary for a group.
@@ -170,6 +147,144 @@ class GroupsLocalHandler(object):
return res
@defer.inlineCallbacks
+ def get_users_in_group(self, group_id, requester_user_id):
+ """Get users in a group
+ """
+ if self.is_mine_id(group_id):
+ res = yield self.groups_server_handler.get_users_in_group(
+ group_id, requester_user_id
+ )
+ return res
+
+ group_server_name = get_domain_from_id(group_id)
+
+ try:
+ res = yield self.transport_client.get_users_in_group(
+ get_domain_from_id(group_id), group_id, requester_user_id
+ )
+ except HttpResponseException as e:
+ raise e.to_synapse_error()
+ except RequestSendFailed:
+ raise SynapseError(502, "Failed to contact group server")
+
+ chunk = res["chunk"]
+ valid_entries = []
+ for entry in chunk:
+ g_user_id = entry["user_id"]
+ attestation = entry.pop("attestation", {})
+ try:
+ if get_domain_from_id(g_user_id) != group_server_name:
+ yield self.attestations.verify_attestation(
+ attestation,
+ group_id=group_id,
+ user_id=g_user_id,
+ server_name=get_domain_from_id(g_user_id),
+ )
+ valid_entries.append(entry)
+ except Exception as e:
+ logger.info("Failed to verify user is in group: %s", e)
+
+ res["chunk"] = valid_entries
+
+ return res
+
+ @defer.inlineCallbacks
+ def get_joined_groups(self, user_id):
+ group_ids = yield self.store.get_joined_groups(user_id)
+ return {"groups": group_ids}
+
+ @defer.inlineCallbacks
+ def get_publicised_groups_for_user(self, user_id):
+ if self.hs.is_mine_id(user_id):
+ result = yield self.store.get_publicised_groups_for_user(user_id)
+
+ # Check AS associated groups for this user - this depends on the
+ # RegExps in the AS registration file (under `users`)
+ for app_service in self.store.get_app_services():
+ result.extend(app_service.get_groups_for_user(user_id))
+
+ return {"groups": result}
+ else:
+ try:
+ bulk_result = yield self.transport_client.bulk_get_publicised_groups(
+ get_domain_from_id(user_id), [user_id]
+ )
+ except HttpResponseException as e:
+ raise e.to_synapse_error()
+ except RequestSendFailed:
+ raise SynapseError(502, "Failed to contact group server")
+
+ result = bulk_result.get("users", {}).get(user_id)
+ # TODO: Verify attestations
+ return {"groups": result}
+
+ @defer.inlineCallbacks
+ def bulk_get_publicised_groups(self, user_ids, proxy=True):
+ destinations = {}
+ local_users = set()
+
+ for user_id in user_ids:
+ if self.hs.is_mine_id(user_id):
+ local_users.add(user_id)
+ else:
+ destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id)
+
+ if not proxy and destinations:
+ raise SynapseError(400, "Some user_ids are not local")
+
+ results = {}
+ failed_results = []
+ for destination, dest_user_ids in iteritems(destinations):
+ try:
+ r = yield self.transport_client.bulk_get_publicised_groups(
+ destination, list(dest_user_ids)
+ )
+ results.update(r["users"])
+ except Exception:
+ failed_results.extend(dest_user_ids)
+
+ for uid in local_users:
+ results[uid] = yield self.store.get_publicised_groups_for_user(uid)
+
+ # Check AS associated groups for this user - this depends on the
+ # RegExps in the AS registration file (under `users`)
+ for app_service in self.store.get_app_services():
+ results[uid].extend(app_service.get_groups_for_user(uid))
+
+ return {"users": results}
+
+
+class GroupsLocalHandler(GroupsLocalWorkerHandler):
+ def __init__(self, hs):
+ super(GroupsLocalHandler, self).__init__(hs)
+
+ # Ensure attestations get renewed
+ hs.get_groups_attestation_renewer()
+
+ # The following functions merely route the query to the local groups server
+ # or federation depending on if the group is local or remote
+
+ update_group_profile = _create_rerouter("update_group_profile")
+
+ add_room_to_group = _create_rerouter("add_room_to_group")
+ update_room_in_group = _create_rerouter("update_room_in_group")
+ remove_room_from_group = _create_rerouter("remove_room_from_group")
+
+ update_group_summary_room = _create_rerouter("update_group_summary_room")
+ delete_group_summary_room = _create_rerouter("delete_group_summary_room")
+
+ update_group_category = _create_rerouter("update_group_category")
+ delete_group_category = _create_rerouter("delete_group_category")
+
+ update_group_summary_user = _create_rerouter("update_group_summary_user")
+ delete_group_summary_user = _create_rerouter("delete_group_summary_user")
+
+ update_group_role = _create_rerouter("update_group_role")
+ delete_group_role = _create_rerouter("delete_group_role")
+
+ set_group_join_policy = _create_rerouter("set_group_join_policy")
+
+ @defer.inlineCallbacks
def create_group(self, group_id, user_id, content):
"""Create a group
"""
@@ -220,48 +335,6 @@ class GroupsLocalHandler(object):
return res
@defer.inlineCallbacks
- def get_users_in_group(self, group_id, requester_user_id):
- """Get users in a group
- """
- if self.is_mine_id(group_id):
- res = yield self.groups_server_handler.get_users_in_group(
- group_id, requester_user_id
- )
- return res
-
- group_server_name = get_domain_from_id(group_id)
-
- try:
- res = yield self.transport_client.get_users_in_group(
- get_domain_from_id(group_id), group_id, requester_user_id
- )
- except HttpResponseException as e:
- raise e.to_synapse_error()
- except RequestSendFailed:
- raise SynapseError(502, "Failed to contact group server")
-
- chunk = res["chunk"]
- valid_entries = []
- for entry in chunk:
- g_user_id = entry["user_id"]
- attestation = entry.pop("attestation", {})
- try:
- if get_domain_from_id(g_user_id) != group_server_name:
- yield self.attestations.verify_attestation(
- attestation,
- group_id=group_id,
- user_id=g_user_id,
- server_name=get_domain_from_id(g_user_id),
- )
- valid_entries.append(entry)
- except Exception as e:
- logger.info("Failed to verify user is in group: %s", e)
-
- res["chunk"] = valid_entries
-
- return res
-
- @defer.inlineCallbacks
def join_group(self, group_id, user_id, content):
"""Request to join a group
"""
@@ -452,68 +525,3 @@ class GroupsLocalHandler(object):
group_id, user_id, membership="leave"
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
-
- @defer.inlineCallbacks
- def get_joined_groups(self, user_id):
- group_ids = yield self.store.get_joined_groups(user_id)
- return {"groups": group_ids}
-
- @defer.inlineCallbacks
- def get_publicised_groups_for_user(self, user_id):
- if self.hs.is_mine_id(user_id):
- result = yield self.store.get_publicised_groups_for_user(user_id)
-
- # Check AS associated groups for this user - this depends on the
- # RegExps in the AS registration file (under `users`)
- for app_service in self.store.get_app_services():
- result.extend(app_service.get_groups_for_user(user_id))
-
- return {"groups": result}
- else:
- try:
- bulk_result = yield self.transport_client.bulk_get_publicised_groups(
- get_domain_from_id(user_id), [user_id]
- )
- except HttpResponseException as e:
- raise e.to_synapse_error()
- except RequestSendFailed:
- raise SynapseError(502, "Failed to contact group server")
-
- result = bulk_result.get("users", {}).get(user_id)
- # TODO: Verify attestations
- return {"groups": result}
-
- @defer.inlineCallbacks
- def bulk_get_publicised_groups(self, user_ids, proxy=True):
- destinations = {}
- local_users = set()
-
- for user_id in user_ids:
- if self.hs.is_mine_id(user_id):
- local_users.add(user_id)
- else:
- destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id)
-
- if not proxy and destinations:
- raise SynapseError(400, "Some user_ids are not local")
-
- results = {}
- failed_results = []
- for destination, dest_user_ids in iteritems(destinations):
- try:
- r = yield self.transport_client.bulk_get_publicised_groups(
- destination, list(dest_user_ids)
- )
- results.update(r["users"])
- except Exception:
- failed_results.extend(dest_user_ids)
-
- for uid in local_users:
- results[uid] = yield self.store.get_publicised_groups_for_user(uid)
-
- # Check AS associated groups for this user - this depends on the
- # RegExps in the AS registration file (under `users`)
- for app_service in self.store.get_app_services():
- results[uid].extend(app_service.get_groups_for_user(uid))
-
- return {"users": results}
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 23f07832e7..94b5279aa6 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
-# Copyright 2018 New Vector Ltd
+# Copyright 2018, 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -33,6 +33,7 @@ from synapse.api.errors import (
CodeMessageException,
Codes,
HttpResponseException,
+ ProxiedRequestError,
SynapseError,
)
from synapse.config.emailconfig import ThreepidBehaviour
@@ -51,14 +52,21 @@ class IdentityHandler(BaseHandler):
def __init__(self, hs):
super(IdentityHandler, self).__init__(hs)
- self.http_client = SimpleHttpClient(hs)
+ self.hs = hs
+ self.http_client = hs.get_simple_http_client()
# We create a blacklisting instance of SimpleHttpClient for contacting identity
# servers specified by clients
self.blacklisting_http_client = SimpleHttpClient(
hs, ip_blacklist=hs.config.federation_ip_range_blacklist
)
self.federation_http_client = hs.get_http_client()
- self.hs = hs
+
+ self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers)
+ self.trust_any_id_server_just_for_testing_do_not_use = (
+ hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
+ )
+ self.rewrite_identity_server_urls = hs.config.rewrite_identity_server_urls
+ self._enable_lookup = hs.config.enable_3pid_lookup
@defer.inlineCallbacks
def threepid_from_creds(self, id_server, creds):
@@ -94,7 +102,15 @@ class IdentityHandler(BaseHandler):
query_params = {"sid": session_id, "client_secret": client_secret}
- url = id_server + "/_matrix/identity/api/v1/3pid/getValidated3pid"
+ # if we have a rewrite rule set for the identity server,
+ # apply it now.
+ if id_server in self.rewrite_identity_server_urls:
+ id_server = self.rewrite_identity_server_urls[id_server]
+
+ url = "https://%s%s" % (
+ id_server,
+ "/_matrix/identity/api/v1/3pid/getValidated3pid",
+ )
try:
data = yield self.http_client.get_json(url, query_params)
@@ -149,14 +165,24 @@ class IdentityHandler(BaseHandler):
if id_access_token is None:
use_v2 = False
+ # if we have a rewrite rule set for the identity server,
+ # apply it now, but only for sending the request (not
+ # storing in the database).
+ if id_server in self.rewrite_identity_server_urls:
+ id_server_host = self.rewrite_identity_server_urls[id_server]
+ else:
+ id_server_host = id_server
+
# Decide which API endpoint URLs to use
headers = {}
bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid}
if use_v2:
- bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,)
- headers["Authorization"] = create_id_access_token_header(id_access_token)
+ bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server_host,)
+ headers["Authorization"] = create_id_access_token_header(
+ id_access_token
+ )
else:
- bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,)
+ bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server_host,)
try:
# Use the blacklisting http client as this call is only to identity servers
@@ -263,6 +289,16 @@ class IdentityHandler(BaseHandler):
)
headers = {b"Authorization": auth_headers}
+ # if we have a rewrite rule set for the identity server,
+ # apply it now.
+ #
+ # Note that destination_is has to be the real id_server, not
+ # the server we connect to.
+ if id_server in self.rewrite_identity_server_urls:
+ id_server = self.rewrite_identity_server_urls[id_server]
+
+ url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
+
try:
# Use the blacklisting http client as this call is only to identity servers
# provided by a client
@@ -400,6 +436,12 @@ class IdentityHandler(BaseHandler):
"client_secret": client_secret,
"send_attempt": send_attempt,
}
+
+ # if we have a rewrite rule set for the identity server,
+ # apply it now.
+ if id_server in self.rewrite_identity_server_urls:
+ id_server = self.rewrite_identity_server_urls[id_server]
+
if next_link:
params["next_link"] = next_link
@@ -466,6 +508,10 @@ class IdentityHandler(BaseHandler):
"details and update your config file."
)
+ # if we have a rewrite rule set for the identity server,
+ # apply it now.
+ if id_server in self.rewrite_identity_server_urls:
+ id_server = self.rewrite_identity_server_urls[id_server]
try:
data = yield self.http_client.post_json_get_json(
id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken",
@@ -566,6 +612,89 @@ class IdentityHandler(BaseHandler):
logger.warning("Error contacting msisdn account_threepid_delegate: %s", e)
raise SynapseError(400, "Error contacting the identity server")
+ # TODO: The following two methods are used for proxying IS requests using
+ # the CS API. They should be consolidated with those in RoomMemberHandler
+ # https://github.com/matrix-org/synapse-dinsic/issues/25
+
+ @defer.inlineCallbacks
+ def proxy_lookup_3pid(self, id_server, medium, address):
+ """Looks up a 3pid in the passed identity server.
+
+ Args:
+ id_server (str): The server name (including port, if required)
+ of the identity server to use.
+ medium (str): The type of the third party identifier (e.g. "email").
+ address (str): The third party identifier (e.g. "foo@example.com").
+
+ Returns:
+ Deferred[dict]: The result of the lookup. See
+ https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup
+ for details
+ """
+ if not self._enable_lookup:
+ raise AuthError(
+ 403, "Looking up third-party identifiers is denied from this server"
+ )
+
+ target = self.rewrite_identity_server_urls.get(id_server, id_server)
+
+ try:
+ data = yield self.http_client.get_json(
+ "https://%s/_matrix/identity/api/v1/lookup" % (target,),
+ {"medium": medium, "address": address},
+ )
+
+ if "mxid" in data:
+ if "signatures" not in data:
+ raise AuthError(401, "No signatures on 3pid binding")
+ yield self._verify_any_signature(data, id_server)
+
+ except HttpResponseException as e:
+ logger.info("Proxied lookup failed: %r", e)
+ raise e.to_synapse_error()
+ except IOError as e:
+ logger.info("Failed to contact %r: %s", id_server, e)
+ raise ProxiedRequestError(503, "Failed to contact identity server")
+
+ defer.returnValue(data)
+
+ @defer.inlineCallbacks
+ def proxy_bulk_lookup_3pid(self, id_server, threepids):
+ """Looks up given 3pids in the passed identity server.
+
+ Args:
+ id_server (str): The server name (including port, if required)
+ of the identity server to use.
+ threepids ([[str, str]]): The third party identifiers to lookup, as
+ a list of 2-string sized lists ([medium, address]).
+
+ Returns:
+ Deferred[dict]: The result of the lookup. See
+ https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup
+ for details
+ """
+ if not self._enable_lookup:
+ raise AuthError(
+ 403, "Looking up third-party identifiers is denied from this server"
+ )
+
+ target = self.rewrite_identity_server_urls.get(id_server, id_server)
+
+ try:
+ data = yield self.http_client.post_json_get_json(
+ "https://%s/_matrix/identity/api/v1/bulk_lookup" % (target,),
+ {"threepids": threepids},
+ )
+
+ except HttpResponseException as e:
+ logger.info("Proxied lookup failed: %r", e)
+ raise e.to_synapse_error()
+ except IOError as e:
+ logger.info("Failed to contact %r: %s", id_server, e)
+ raise ProxiedRequestError(503, "Failed to contact identity server")
+
+ defer.returnValue(data)
+
@defer.inlineCallbacks
def lookup_3pid(self, id_server, medium, address, id_access_token=None):
"""Looks up a 3pid in the passed identity server.
@@ -581,6 +710,9 @@ class IdentityHandler(BaseHandler):
Returns:
str|None: the matrix ID of the 3pid, or None if it is not recognized.
"""
+ # Rewrite id_server URL if necessary
+ id_server = self._get_id_server_target(id_server)
+
if id_access_token is not None:
try:
results = yield self._lookup_3pid_v2(
@@ -618,7 +750,7 @@ class IdentityHandler(BaseHandler):
str: the matrix ID of the 3pid, or None if it is not recognized.
"""
try:
- data = yield self.blacklisting_http_client.get_json(
+ data = yield self.http_client.get_json(
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server),
{"medium": medium, "address": address},
)
@@ -651,7 +783,7 @@ class IdentityHandler(BaseHandler):
"""
# Check what hashing details are supported by this identity server
try:
- hash_details = yield self.blacklisting_http_client.get_json(
+ hash_details = yield self.http_client.get_json(
"%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server),
{"access_token": id_access_token},
)
@@ -669,7 +801,7 @@ class IdentityHandler(BaseHandler):
400,
"Non-dict object from %s%s during v2 hash_details request: %s"
% (id_server_scheme, id_server, hash_details),
- )
+ )
# Extract information from hash_details
supported_lookup_algorithms = hash_details.get("algorithms")
@@ -684,7 +816,7 @@ class IdentityHandler(BaseHandler):
400,
"Invalid hash details received from identity server %s%s: %s"
% (id_server_scheme, id_server, hash_details),
- )
+ )
# Check if any of the supported lookup algorithms are present
if LookupAlgorithm.SHA256 in supported_lookup_algorithms:
@@ -718,7 +850,7 @@ class IdentityHandler(BaseHandler):
headers = {"Authorization": create_id_access_token_header(id_access_token)}
try:
- lookup_results = yield self.blacklisting_http_client.post_json_get_json(
+ lookup_results = yield self.http_client.post_json_get_json(
"%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server),
{
"addresses": [lookup_value],
@@ -726,7 +858,7 @@ class IdentityHandler(BaseHandler):
"pepper": lookup_pepper,
},
headers=headers,
- )
+ )
except TimeoutError:
raise SynapseError(500, "Timed out contacting identity server")
except Exception as e:
@@ -750,14 +882,15 @@ class IdentityHandler(BaseHandler):
def _verify_any_signature(self, data, server_hostname):
if server_hostname not in data["signatures"]:
raise AuthError(401, "No signature from server %s" % (server_hostname,))
+
for key_name, signature in data["signatures"][server_hostname].items():
- try:
- key_data = yield self.blacklisting_http_client.get_json(
- "%s%s/_matrix/identity/api/v1/pubkey/%s"
- % (id_server_scheme, server_hostname, key_name)
- )
- except TimeoutError:
- raise SynapseError(500, "Timed out contacting identity server")
+ target = self.rewrite_identity_server_urls.get(
+ server_hostname, server_hostname
+ )
+
+ key_data = yield self.http_client.get_json(
+ "https://%s/_matrix/identity/api/v1/pubkey/%s" % (target, key_name)
+ )
if "public_key" not in key_data:
raise AuthError(
401, "No public key named %s from %s" % (key_name, server_hostname)
@@ -771,6 +904,23 @@ class IdentityHandler(BaseHandler):
)
return
+ raise AuthError(401, "No signature from server %s" % (server_hostname,))
+
+ def _get_id_server_target(self, id_server):
+ """Looks up an id_server's actual http endpoint
+
+ Args:
+ id_server (str): the server name to lookup.
+
+ Returns:
+ the http endpoint to connect to.
+ """
+ if id_server in self.rewrite_identity_server_urls:
+ return self.rewrite_identity_server_urls[id_server]
+
+ return id_server
+
+
@defer.inlineCallbacks
def ask_id_server_for_third_party_invite(
self,
@@ -831,6 +981,9 @@ class IdentityHandler(BaseHandler):
"sender_avatar_url": inviter_avatar_url,
}
+ # Rewrite the identity server URL if necessary
+ id_server = self._get_id_server_target(id_server)
+
# Add the identity service access token to the JSON body and use the v2
# Identity Service endpoints if id_access_token is present
data = None
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index bdf16c84d3..be6ae18a92 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -932,10 +932,9 @@ class EventCreationHandler(object):
# way? If we have been invited by a remote server, we need
# to get them to sign the event.
- returned_invite = yield federation_handler.send_invite(
- invitee.domain, event
+ returned_invite = yield defer.ensureDeferred(
+ federation_handler.send_invite(invitee.domain, event)
)
-
event.unsigned.pop("room_state", None)
# TODO: Make sure the signatures actually are correct.
diff --git a/synapse/handlers/password_policy.py b/synapse/handlers/password_policy.py
new file mode 100644
index 0000000000..d06b110269
--- /dev/null
+++ b/synapse/handlers/password_policy.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import re
+
+from synapse.api.errors import Codes, PasswordRefusedError
+
+logger = logging.getLogger(__name__)
+
+
+class PasswordPolicyHandler(object):
+ def __init__(self, hs):
+ self.policy = hs.config.password_policy
+ self.enabled = hs.config.password_policy_enabled
+
+ # Regexps for the spec'd policy parameters.
+ self.regexp_digit = re.compile("[0-9]")
+ self.regexp_symbol = re.compile("[^a-zA-Z0-9]")
+ self.regexp_uppercase = re.compile("[A-Z]")
+ self.regexp_lowercase = re.compile("[a-z]")
+
+ def validate_password(self, password):
+ """Checks whether a given password complies with the server's policy.
+
+ Args:
+ password (str): The password to check against the server's policy.
+
+ Raises:
+ PasswordRefusedError: The password doesn't comply with the server's policy.
+ """
+
+ if not self.enabled:
+ return
+
+ minimum_accepted_length = self.policy.get("minimum_length", 0)
+ if len(password) < minimum_accepted_length:
+ raise PasswordRefusedError(
+ msg=(
+ "The password must be at least %d characters long"
+ % minimum_accepted_length
+ ),
+ errcode=Codes.PASSWORD_TOO_SHORT,
+ )
+
+ if (
+ self.policy.get("require_digit", False)
+ and self.regexp_digit.search(password) is None
+ ):
+ raise PasswordRefusedError(
+ msg="The password must include at least one digit",
+ errcode=Codes.PASSWORD_NO_DIGIT,
+ )
+
+ if (
+ self.policy.get("require_symbol", False)
+ and self.regexp_symbol.search(password) is None
+ ):
+ raise PasswordRefusedError(
+ msg="The password must include at least one symbol",
+ errcode=Codes.PASSWORD_NO_SYMBOL,
+ )
+
+ if (
+ self.policy.get("require_uppercase", False)
+ and self.regexp_uppercase.search(password) is None
+ ):
+ raise PasswordRefusedError(
+ msg="The password must include at least one uppercase letter",
+ errcode=Codes.PASSWORD_NO_UPPERCASE,
+ )
+
+ if (
+ self.policy.get("require_lowercase", False)
+ and self.regexp_lowercase.search(password) is None
+ ):
+ raise PasswordRefusedError(
+ msg="The password must include at least one lowercase letter",
+ errcode=Codes.PASSWORD_NO_LOWERCASE,
+ )
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index f9579d69ee..75227ae34b 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,8 +17,11 @@
import logging
from six import raise_from
+from six.moves import range
-from twisted.internet import defer
+from signedjson.sign import sign_json
+
+from twisted.internet import defer, reactor
from synapse.api.errors import (
AuthError,
@@ -27,6 +31,7 @@ from synapse.api.errors import (
StoreError,
SynapseError,
)
+from synapse.logging.context import run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID, get_domain_from_id
@@ -46,6 +51,8 @@ class BaseProfileHandler(BaseHandler):
subclass MasterProfileHandler
"""
+ PROFILE_REPLICATE_INTERVAL = 2 * 60 * 1000
+
def __init__(self, hs):
super(BaseProfileHandler, self).__init__(hs)
@@ -56,6 +63,87 @@ class BaseProfileHandler(BaseHandler):
self.user_directory_handler = hs.get_user_directory_handler()
+ self.http_client = hs.get_simple_http_client()
+
+ self.max_avatar_size = hs.config.max_avatar_size
+ self.allowed_avatar_mimetypes = hs.config.allowed_avatar_mimetypes
+
+ if hs.config.worker_app is None:
+ self.clock.looping_call(
+ self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS
+ )
+
+ if len(self.hs.config.replicate_user_profiles_to) > 0:
+ reactor.callWhenRunning(self._assign_profile_replication_batches)
+ reactor.callWhenRunning(self._replicate_profiles)
+ # Add a looping call to replicate_profiles: this handles retries
+ # if the replication is unsuccessful when the user updated their
+ # profile.
+ self.clock.looping_call(
+ self._replicate_profiles, self.PROFILE_REPLICATE_INTERVAL
+ )
+
+ @defer.inlineCallbacks
+ def _assign_profile_replication_batches(self):
+ """If no profile replication has been done yet, allocate replication batch
+ numbers to each profile to start the replication process.
+ """
+ logger.info("Assigning profile batch numbers...")
+ total = 0
+ while True:
+ assigned = yield self.store.assign_profile_batch()
+ total += assigned
+ if assigned == 0:
+ break
+ logger.info("Assigned %d profile batch numbers", total)
+
+ @defer.inlineCallbacks
+ def _replicate_profiles(self):
+ """If any profile data has been updated and not pushed to the replication targets,
+ replicate it.
+ """
+ host_batches = yield self.store.get_replication_hosts()
+ latest_batch = yield self.store.get_latest_profile_replication_batch_number()
+ if latest_batch is None:
+ latest_batch = -1
+ for repl_host in self.hs.config.replicate_user_profiles_to:
+ if repl_host not in host_batches:
+ host_batches[repl_host] = -1
+ try:
+ for i in range(host_batches[repl_host] + 1, latest_batch + 1):
+ yield self._replicate_host_profile_batch(repl_host, i)
+ except Exception:
+ logger.exception(
+ "Exception while replicating to %s: aborting for now", repl_host
+ )
+
+ @defer.inlineCallbacks
+ def _replicate_host_profile_batch(self, host, batchnum):
+ logger.info("Replicating profile batch %d to %s", batchnum, host)
+ batch_rows = yield self.store.get_profile_batch(batchnum)
+ batch = {
+ UserID(r["user_id"], self.hs.hostname).to_string(): (
+ {"display_name": r["displayname"], "avatar_url": r["avatar_url"]}
+ if r["active"]
+ else None
+ )
+ for r in batch_rows
+ }
+
+ url = "https://%s/_matrix/identity/api/v1/replicate_profiles" % (host,)
+ body = {"batchnum": batchnum, "batch": batch, "origin_server": self.hs.hostname}
+ signed_body = sign_json(body, self.hs.hostname, self.hs.config.signing_key[0])
+ try:
+ yield self.http_client.post_json_get_json(url, signed_body)
+ yield self.store.update_replication_batch_for_host(host, batchnum)
+ logger.info("Sucessfully replicated profile batch %d to %s", batchnum, host)
+ except Exception:
+ # This will get retried when the looping call next comes around
+ logger.exception(
+ "Failed to replicate profile batch %d to %s", batchnum, host
+ )
+ raise
+
@defer.inlineCallbacks
def get_profile(self, user_id):
target_user = UserID.from_string(user_id)
@@ -154,9 +242,16 @@ class BaseProfileHandler(BaseHandler):
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this homeserver")
- if not by_admin and target_user != requester.user:
+ if not by_admin and requester and target_user != requester.user:
raise AuthError(400, "Cannot set another user's displayname")
+ if not by_admin and self.hs.config.disable_set_displayname:
+ profile = yield self.store.get_profileinfo(target_user.localpart)
+ if profile.display_name:
+ raise SynapseError(
+ 400, "Changing displayname is disabled on this server"
+ )
+
if len(new_displayname) > MAX_DISPLAYNAME_LEN:
raise SynapseError(
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN,)
@@ -165,7 +260,17 @@ class BaseProfileHandler(BaseHandler):
if new_displayname == "":
new_displayname = None
- yield self.store.set_profile_displayname(target_user.localpart, new_displayname)
+ if len(self.hs.config.replicate_user_profiles_to) > 0:
+ cur_batchnum = (
+ yield self.store.get_latest_profile_replication_batch_number()
+ )
+ new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
+ else:
+ new_batchnum = None
+
+ yield self.store.set_profile_displayname(
+ target_user.localpart, new_displayname, new_batchnum
+ )
if self.hs.config.user_directory_search_all_users:
profile = yield self.store.get_profileinfo(target_user.localpart)
@@ -173,7 +278,39 @@ class BaseProfileHandler(BaseHandler):
target_user.to_string(), profile
)
- yield self._update_join_states(requester, target_user)
+ if requester:
+ yield self._update_join_states(requester, target_user)
+
+ # start a profile replication push
+ run_in_background(self._replicate_profiles)
+
+ @defer.inlineCallbacks
+ def set_active(self, target_user, active, hide):
+ """
+ Sets the 'active' flag on a user profile. If set to false, the user
+ account is considered deactivated or hidden.
+
+ If 'hide' is true, then we interpret active=False as a request to try to
+ hide the user rather than deactivating it. This means withholding the
+ profile from replication (and mark it as inactive) rather than clearing
+ the profile from the HS DB. Note that unlike set_displayname and
+ set_avatar_url, this does *not* perform authorization checks! This is
+ because the only place it's used currently is in account deactivation
+ where we've already done these checks anyway.
+ """
+ if len(self.hs.config.replicate_user_profiles_to) > 0:
+ cur_batchnum = (
+ yield self.store.get_latest_profile_replication_batch_number()
+ )
+ new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
+ else:
+ new_batchnum = None
+ yield self.store.set_profile_active(
+ target_user.localpart, active, hide, new_batchnum
+ )
+
+ # start a profile replication push
+ run_in_background(self._replicate_profiles)
@defer.inlineCallbacks
def get_avatar_url(self, target_user):
@@ -212,12 +349,59 @@ class BaseProfileHandler(BaseHandler):
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's avatar_url")
+ if not by_admin and self.hs.config.disable_set_avatar_url:
+ profile = yield self.store.get_profileinfo(target_user.localpart)
+ if profile.avatar_url:
+ raise SynapseError(
+ 400, "Changing avatar url is disabled on this server"
+ )
+
+ if len(self.hs.config.replicate_user_profiles_to) > 0:
+ cur_batchnum = (
+ yield self.store.get_latest_profile_replication_batch_number()
+ )
+ new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
+ else:
+ new_batchnum = None
+
if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
raise SynapseError(
400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,)
)
- yield self.store.set_profile_avatar_url(target_user.localpart, new_avatar_url)
+ # Enforce a max avatar size if one is defined
+ if self.max_avatar_size or self.allowed_avatar_mimetypes:
+ media_id = self._validate_and_parse_media_id_from_avatar_url(new_avatar_url)
+
+ # Check that this media exists locally
+ media_info = yield self.store.get_local_media(media_id)
+ if not media_info:
+ raise SynapseError(
+ 400, "Unknown media id supplied", errcode=Codes.NOT_FOUND
+ )
+
+ # Ensure avatar does not exceed max allowed avatar size
+ media_size = media_info["media_length"]
+ if self.max_avatar_size and media_size > self.max_avatar_size:
+ raise SynapseError(
+ 400,
+ "Avatars must be less than %s bytes in size"
+ % (self.max_avatar_size,),
+ errcode=Codes.TOO_LARGE,
+ )
+
+ # Ensure the avatar's file type is allowed
+ if (
+ self.allowed_avatar_mimetypes
+ and media_info["media_type"] not in self.allowed_avatar_mimetypes
+ ):
+ raise SynapseError(
+ 400, "Avatar file type '%s' not allowed" % media_info["media_type"]
+ )
+
+ yield self.store.set_profile_avatar_url(
+ target_user.localpart, new_avatar_url, new_batchnum
+ )
if self.hs.config.user_directory_search_all_users:
profile = yield self.store.get_profileinfo(target_user.localpart)
@@ -227,6 +411,23 @@ class BaseProfileHandler(BaseHandler):
yield self._update_join_states(requester, target_user)
+ # start a profile replication push
+ run_in_background(self._replicate_profiles)
+
+ def _validate_and_parse_media_id_from_avatar_url(self, mxc):
+ """Validate and parse a provided avatar url and return the local media id
+
+ Args:
+ mxc (str): A mxc URL
+
+ Returns:
+ str: The ID of the media
+ """
+ avatar_pieces = mxc.split("/")
+ if len(avatar_pieces) != 4 or avatar_pieces[0] != "mxc:":
+ raise SynapseError(400, "Invalid avatar URL '%s' supplied" % mxc)
+ return avatar_pieces[-1]
+
@defer.inlineCallbacks
def on_profile_query(self, args):
user = UserID.from_string(args["user_id"])
@@ -282,7 +483,7 @@ class BaseProfileHandler(BaseHandler):
@defer.inlineCallbacks
def check_profile_query_allowed(self, target_user, requester=None):
"""Checks whether a profile query is allowed. If the
- 'require_auth_for_profile_requests' config flag is set to True and a
+ 'limit_profile_requests_to_known_users' config flag is set to True and a
'requester' is provided, the query is only allowed if the two users
share a room.
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 7ffc194f0c..696d90996a 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -49,6 +49,7 @@ class RegistrationHandler(BaseHandler):
self._auth_handler = hs.get_auth_handler()
self.profile_handler = hs.get_profile_handler()
self.user_directory_handler = hs.get_user_directory_handler()
+ self.http_client = hs.get_simple_http_client()
self.identity_handler = self.hs.get_handlers().identity_handler
self.ratelimiter = hs.get_registration_ratelimiter()
@@ -61,6 +62,8 @@ class RegistrationHandler(BaseHandler):
)
self._server_notices_mxid = hs.config.server_notices_mxid
+ self._show_in_user_directory = self.hs.config.show_users_in_user_directory
+
if hs.config.worker_app:
self._register_client = ReplicationRegisterServlet.make_client(hs)
self._register_device_client = RegisterDeviceReplicationServlet.make_client(
@@ -203,6 +206,11 @@ class RegistrationHandler(BaseHandler):
address=address,
)
+ if default_display_name:
+ yield self.profile_handler.set_displayname(
+ user, None, default_display_name, by_admin=True
+ )
+
if self.hs.config.user_directory_search_all_users:
profile = yield self.store.get_profileinfo(localpart)
yield self.user_directory_handler.handle_local_profile_change(
@@ -233,6 +241,10 @@ class RegistrationHandler(BaseHandler):
address=address,
)
+ yield self.profile_handler.set_displayname(
+ user, None, default_display_name, by_admin=True
+ )
+
# Successfully registered
break
except SynapseError:
@@ -262,6 +274,14 @@ class RegistrationHandler(BaseHandler):
# Bind email to new account
yield self._register_email_threepid(user_id, threepid_dict, None)
+ # Prevent the new user from showing up in the user directory if the server
+ # mandates it.
+ if not self._show_in_user_directory:
+ yield self.store.add_account_data_for_user(
+ user_id, "im.vector.hide_profile", {"hide_profile": True}
+ )
+ yield self.profile_handler.set_active(user, False, True)
+
return user_id
@defer.inlineCallbacks
@@ -328,7 +348,9 @@ class RegistrationHandler(BaseHandler):
yield self._auto_join_rooms(user_id)
@defer.inlineCallbacks
- def appservice_register(self, user_localpart, as_token):
+ def appservice_register(self, user_localpart, as_token, password, display_name):
+ # FIXME: this should be factored out and merged with normal register()
+
user = UserID(user_localpart, self.hs.hostname)
user_id = user.to_string()
service = self.store.get_app_service_by_token(as_token)
@@ -347,12 +369,29 @@ class RegistrationHandler(BaseHandler):
user_id, allowed_appservice=service
)
+ password_hash = ""
+ if password:
+ password_hash = yield self.auth_handler().hash(password)
+
+ display_name = display_name or user.localpart
+
yield self.register_with_store(
user_id=user_id,
- password_hash="",
+ password_hash=password_hash,
appservice_id=service_id,
- create_profile_with_displayname=user.localpart,
+ create_profile_with_displayname=display_name,
)
+
+ yield self.profile_handler.set_displayname(
+ user, None, display_name, by_admin=True
+ )
+
+ if self.hs.config.user_directory_search_all_users:
+ profile = yield self.store.get_profileinfo(user_localpart)
+ yield self.user_directory_handler.handle_local_profile_change(
+ user_id, profile
+ )
+
return user_id
def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
@@ -380,6 +419,39 @@ class RegistrationHandler(BaseHandler):
)
@defer.inlineCallbacks
+ def shadow_register(self, localpart, display_name, auth_result, params):
+ """Invokes the current registration on another server, using
+ shared secret registration, passing in any auth_results from
+ other registration UI auth flows (e.g. validated 3pids)
+ Useful for setting up shadow/backup accounts on a parallel deployment.
+ """
+
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.post_json_get_json(
+ "%s/_matrix/client/r0/register?access_token=%s" % (shadow_hs_url, as_token),
+ {
+ # XXX: auth_result is an unspecified extension for shadow registration
+ "auth_result": auth_result,
+ # XXX: another unspecified extension for shadow registration to ensure
+ # that the displayname is correctly set by the masters erver
+ "display_name": display_name,
+ "username": localpart,
+ "password": params.get("password"),
+ "bind_email": params.get("bind_email"),
+ "bind_msisdn": params.get("bind_msisdn"),
+ "device_id": params.get("device_id"),
+ "initial_device_display_name": params.get(
+ "initial_device_display_name"
+ ),
+ "inhibit_login": False,
+ "access_token": as_token,
+ },
+ )
+
+ @defer.inlineCallbacks
def _generate_user_id(self):
if self._next_generated_user_id is None:
with (yield self._generate_user_id_linearizer.queue(())):
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index b609a65f47..ccf0e962f6 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -64,12 +64,14 @@ class RoomCreationHandler(BaseHandler):
"history_visibility": "shared",
"original_invitees_have_ops": False,
"guest_can_join": True,
+ "encryption_alg": "m.megolm.v1.aes-sha2",
},
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": "shared",
"original_invitees_have_ops": True,
"guest_can_join": True,
+ "encryption_alg": "m.megolm.v1.aes-sha2",
},
RoomCreationPreset.PUBLIC_CHAT: {
"join_rules": JoinRules.PUBLIC,
@@ -259,7 +261,7 @@ class RoomCreationHandler(BaseHandler):
for v in ("invite", "events_default"):
current = int(pl_content.get(v, 0))
if current < restricted_level:
- logger.info(
+ logger.debug(
"Setting level for %s in %s to %i (was %i)",
v,
old_room_id,
@@ -269,7 +271,7 @@ class RoomCreationHandler(BaseHandler):
pl_content[v] = restricted_level
updated = True
else:
- logger.info("Not setting level for %s (already %i)", v, current)
+ logger.debug("Not setting level for %s (already %i)", v, current)
if updated:
try:
@@ -296,7 +298,7 @@ class RoomCreationHandler(BaseHandler):
EventTypes.Aliases, events_default
)
- logger.info("Setting correct PLs in new room to %s", new_pl_content)
+ logger.debug("Setting correct PLs in new room to %s", new_pl_content)
yield self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
@@ -332,7 +334,19 @@ class RoomCreationHandler(BaseHandler):
"""
user_id = requester.user.to_string()
- if not self.spam_checker.user_may_create_room(user_id):
+ if (
+ self._server_notices_mxid is not None
+ and requester.user.to_string() == self._server_notices_mxid
+ ):
+ # allow the server notices mxid to create rooms
+ is_requester_admin = True
+
+ else:
+ is_requester_admin = yield self.auth.is_server_admin(requester.user)
+
+ if not is_requester_admin and not self.spam_checker.user_may_create_room(
+ user_id, invite_list=[], third_party_invite_list=[], cloning=True
+ ):
raise SynapseError(403, "You are not permitted to create rooms")
creation_content = {
@@ -579,12 +593,22 @@ class RoomCreationHandler(BaseHandler):
# Check whether the third party rules allows/changes the room create
# request.
- yield self.third_party_event_rules.on_create_room(
+ event_allowed = yield self.third_party_event_rules.on_create_room(
requester, config, is_requester_admin=is_requester_admin
)
+ if not event_allowed:
+ raise SynapseError(
+ 403, "You are not permitted to create rooms", Codes.FORBIDDEN
+ )
+
+ invite_list = config.get("invite", [])
+ invite_3pid_list = config.get("invite_3pid", [])
if not is_requester_admin and not self.spam_checker.user_may_create_room(
- user_id
+ user_id,
+ invite_list=invite_list,
+ third_party_invite_list=invite_3pid_list,
+ cloning=False,
):
raise SynapseError(403, "You are not permitted to create rooms")
@@ -619,7 +643,6 @@ class RoomCreationHandler(BaseHandler):
else:
room_alias = None
- invite_list = config.get("invite", [])
for i in invite_list:
try:
uid = UserID.from_string(i)
@@ -641,8 +664,6 @@ class RoomCreationHandler(BaseHandler):
% (user_id,),
)
- invite_3pid_list = config.get("invite_3pid", [])
-
visibility = config.get("visibility", None)
is_public = visibility == "public"
@@ -732,6 +753,7 @@ class RoomCreationHandler(BaseHandler):
"invite",
ratelimit=False,
content=content,
+ new_room=True,
)
for invite_3pid in invite_3pid_list:
@@ -747,6 +769,7 @@ class RoomCreationHandler(BaseHandler):
id_server,
requester,
txn_id=None,
+ new_room=True,
id_access_token=id_access_token,
)
@@ -782,7 +805,7 @@ class RoomCreationHandler(BaseHandler):
@defer.inlineCallbacks
def send(etype, content, **kwargs):
event = create(etype, content, **kwargs)
- logger.info("Sending %s in new room", etype)
+ logger.debug("Sending %s in new room", etype)
yield self.event_creation_handler.create_and_send_nonmember_event(
creator, event, ratelimit=False
)
@@ -796,7 +819,7 @@ class RoomCreationHandler(BaseHandler):
creation_content.update({"creator": creator_id})
yield send(etype=EventTypes.Create, content=creation_content)
- logger.info("Sending %s in new room", EventTypes.Member)
+ logger.debug("Sending %s in new room", EventTypes.Member)
yield self.room_member_handler.update_membership(
creator,
creator.user,
@@ -804,6 +827,7 @@ class RoomCreationHandler(BaseHandler):
"join",
ratelimit=False,
content=creator_join_profile,
+ new_room=True,
)
# We treat the power levels override specially as this needs to be one
@@ -869,6 +893,13 @@ class RoomCreationHandler(BaseHandler):
for (etype, state_key), content in initial_state.items():
yield send(etype=etype, state_key=state_key, content=content)
+ if "encryption_alg" in config:
+ yield send(
+ etype=EventTypes.Encryption,
+ state_key="",
+ content={"algorithm": config["encryption_alg"]},
+ )
+
@defer.inlineCallbacks
def _generate_room_id(
self, creator_id: str, is_public: str, room_version: RoomVersion,
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 15e8aa5249..decef944ff 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -24,13 +24,20 @@ from twisted.internet import defer
from synapse import types
from synapse.api.constants import EventTypes, Membership
+from synapse.api.ratelimiting import Ratelimiter
+from synapse.api.errors import (
+ AuthError,
+ Codes,
+ HttpResponseException,
+ SynapseError,
+)
+from synapse.handlers.identity import LookupAlgorithm, create_id_access_token_header
+from synapse.http.client import SimpleHttpClient
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.types import Collection, RoomID, UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_joined_room, user_left_room
-from ._base import BaseHandler
-
logger = logging.getLogger(__name__)
@@ -60,6 +67,7 @@ class RoomMemberHandler(object):
self.registration_handler = hs.get_registration_handler()
self.profile_handler = hs.get_profile_handler()
self.event_creation_handler = hs.get_event_creation_handler()
+ self.identity_handler = hs.get_handlers().identity_handler
self.member_linearizer = Linearizer(name="member")
@@ -67,13 +75,10 @@ class RoomMemberHandler(object):
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._server_notices_mxid = self.config.server_notices_mxid
+ self.rewrite_identity_server_urls = self.config.rewrite_identity_server_urls
self._enable_lookup = hs.config.enable_3pid_lookup
self.allow_per_room_profiles = self.config.allow_per_room_profiles
-
- # This is only used to get at ratelimit function, and
- # maybe_kick_guest_users. It's fine there are multiple of these as
- # it doesn't store state.
- self.base_handler = BaseHandler(hs)
+ self.ratelimiter = Ratelimiter()
@abc.abstractmethod
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
@@ -265,8 +270,31 @@ class RoomMemberHandler(object):
third_party_signed=None,
ratelimit=True,
content=None,
+ new_room=False,
require_consent=True,
):
+ """Update a users membership in a room
+
+ Args:
+ requester (Requester)
+ target (UserID)
+ room_id (str)
+ action (str): The "action" the requester is performing against the
+ target. One of join/leave/kick/ban/invite/unban.
+ txn_id (str|None): The transaction ID associated with the request,
+ or None not provided.
+ remote_room_hosts (list[str]|None): List of remote servers to try
+ and join via if server isn't already in the room.
+ third_party_signed (dict|None): The signed object for third party
+ invites.
+ ratelimit (bool): Whether to apply ratelimiting to this request.
+ content (dict|None): Fields to include in the new events content.
+ new_room (bool): Whether these membership changes are happening
+ as part of a room creation (e.g. initial joins and invites)
+
+ Returns:
+ Deferred[FrozenEvent]
+ """
key = (room_id,)
with (yield self.member_linearizer.queue(key)):
@@ -280,6 +308,7 @@ class RoomMemberHandler(object):
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
+ new_room=new_room,
require_consent=require_consent,
)
@@ -297,6 +326,7 @@ class RoomMemberHandler(object):
third_party_signed=None,
ratelimit=True,
content=None,
+ new_room=False,
require_consent=True,
):
content_specified = bool(content)
@@ -361,8 +391,15 @@ class RoomMemberHandler(object):
)
block_invite = True
+ is_published = yield self.store.is_room_published(room_id)
+
if not self.spam_checker.user_may_invite(
- requester.user.to_string(), target.to_string(), room_id
+ requester.user.to_string(),
+ target.to_string(),
+ third_party_invite=None,
+ room_id=room_id,
+ new_room=new_room,
+ published_room=is_published,
):
logger.info("Blocking invite due to spam checker")
block_invite = True
@@ -434,8 +471,26 @@ class RoomMemberHandler(object):
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
+ if (
+ self._server_notices_mxid is not None
+ and requester.user.to_string() == self._server_notices_mxid
+ ):
+ # allow the server notices mxid to join rooms
+ is_requester_admin = True
+
+ else:
+ is_requester_admin = yield self.auth.is_server_admin(requester.user)
+
+ inviter = yield self._get_inviter(target.to_string(), room_id)
+ if not is_requester_admin:
+ # We assume that if the spam checker allowed the user to create
+ # a room then they're allowed to join it.
+ if not new_room and not self.spam_checker.user_may_join_room(
+ target.to_string(), room_id, is_invited=inviter is not None
+ ):
+ raise SynapseError(403, "Not allowed to join this room")
+
if not is_host_in_room:
- inviter = yield self._get_inviter(target.to_string(), room_id)
if inviter and not self.hs.is_mine(inviter):
remote_room_hosts.append(inviter.domain)
@@ -706,6 +761,7 @@ class RoomMemberHandler(object):
id_server,
requester,
txn_id,
+ new_room=False,
id_access_token=None,
):
if self.config.block_non_admin_invites:
@@ -717,7 +773,23 @@ class RoomMemberHandler(object):
# We need to rate limit *before* we send out any 3PID invites, so we
# can't just rely on the standard ratelimiting of events.
- yield self.base_handler.ratelimit(requester)
+ self.ratelimiter.ratelimit(
+ requester.user.to_string(),
+ time_now_s=self.hs.clock.time(),
+ rate_hz=self.hs.config.rc_third_party_invite.per_second,
+ burst_count=self.hs.config.rc_third_party_invite.burst_count,
+ update=True,
+ )
+
+ can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited(
+ medium, address, room_id
+ )
+ if not can_invite:
+ raise SynapseError(
+ 403,
+ "This third-party identifier can not be invited in this room",
+ Codes.FORBIDDEN,
+ )
can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited(
medium, address, room_id
@@ -738,6 +810,19 @@ class RoomMemberHandler(object):
id_server, medium, address, id_access_token
)
+ is_published = yield self.store.is_room_published(room_id)
+
+ if not self.spam_checker.user_may_invite(
+ requester.user.to_string(),
+ invitee,
+ third_party_invite={"medium": medium, "address": address},
+ room_id=room_id,
+ new_room=new_room,
+ published_room=is_published,
+ ):
+ logger.info("Blocking invite due to spam checker")
+ raise SynapseError(403, "Invites have been disabled on this server")
+
if invitee:
yield self.update_membership(
requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id
@@ -944,8 +1029,10 @@ class RoomMemberMasterHandler(RoomMemberHandler):
# join dance for now, since we're kinda implicitly checking
# that we are allowed to join when we decide whether or not we
# need to do the invite/join dance.
- yield self.federation_handler.do_invite_join(
- remote_room_hosts, room_id, user.to_string(), content
+ yield defer.ensureDeferred(
+ self.federation_handler.do_invite_join(
+ remote_room_hosts, room_id, user.to_string(), content
+ )
)
yield self._user_joined_room(user, room_id)
@@ -982,8 +1069,10 @@ class RoomMemberMasterHandler(RoomMemberHandler):
"""
fed_handler = self.federation_handler
try:
- ret = yield fed_handler.do_remotely_reject_invite(
- remote_room_hosts, room_id, target.to_string(), content=content,
+ ret = yield defer.ensureDeferred(
+ fed_handler.do_remotely_reject_invite(
+ remote_room_hosts, room_id, target.to_string(), content=content,
+ )
)
return ret
except Exception as e:
diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py
index d90c9e0108..3f50d6de47 100644
--- a/synapse/handlers/set_password.py
+++ b/synapse/handlers/set_password.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
-# Copyright 2017 New Vector Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -30,12 +31,15 @@ class SetPasswordHandler(BaseHandler):
super(SetPasswordHandler, self).__init__(hs)
self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler()
+ self._password_policy_handler = hs.get_password_policy_handler()
@defer.inlineCallbacks
def set_password(self, user_id, newpassword, requester=None):
if not self.hs.config.password_localdb_enabled:
raise SynapseError(403, "Password change disabled", errcode=Codes.FORBIDDEN)
+ self._password_policy_handler.validate_password(newpassword)
+
password_hash = yield self._auth_handler.hash(newpassword)
except_device_id = requester.device_id if requester else None
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 68e6edace5..d93a276693 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -300,7 +300,7 @@ class StatsHandler(StateDeltasHandler):
room_state["guest_access"] = event_content.get("guest_access")
for room_id, state in room_to_state_updates.items():
- logger.info("Updating room_stats_state for %s: %s", room_id, state)
+ logger.debug("Updating room_stats_state for %s: %s", room_id, state)
yield self.store.update_room_state(room_id, state)
return room_to_stats_deltas, user_to_stats_deltas
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 2b62fd83fd..4324bc702e 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -14,20 +14,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import collections
import itertools
import logging
+from typing import Any, Dict, FrozenSet, List, Optional, Set, Tuple
from six import iteritems, itervalues
+import attr
from prometheus_client import Counter
from synapse.api.constants import EventTypes, Membership
+from synapse.api.filtering import FilterCollection
+from synapse.events import EventBase
from synapse.logging.context import LoggingContext
from synapse.push.clientformat import format_push_rules_for_user
from synapse.storage.roommember import MemberSummary
from synapse.storage.state import StateFilter
-from synapse.types import RoomStreamToken
+from synapse.types import (
+ Collection,
+ JsonDict,
+ RoomStreamToken,
+ StateMap,
+ StreamToken,
+ UserID,
+)
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.lrucache import LruCache
@@ -62,17 +72,22 @@ LAZY_LOADED_MEMBERS_CACHE_MAX_AGE = 30 * 60 * 1000
LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE = 100
-SyncConfig = collections.namedtuple(
- "SyncConfig", ["user", "filter_collection", "is_guest", "request_key", "device_id"]
-)
+@attr.s(slots=True, frozen=True)
+class SyncConfig:
+ user = attr.ib(type=UserID)
+ filter_collection = attr.ib(type=FilterCollection)
+ is_guest = attr.ib(type=bool)
+ request_key = attr.ib(type=Tuple[Any, ...])
+ device_id = attr.ib(type=str)
-class TimelineBatch(
- collections.namedtuple("TimelineBatch", ["prev_batch", "events", "limited"])
-):
- __slots__ = []
+@attr.s(slots=True, frozen=True)
+class TimelineBatch:
+ prev_batch = attr.ib(type=StreamToken)
+ events = attr.ib(type=List[EventBase])
+ limited = attr.ib(bool)
- def __nonzero__(self):
+ def __nonzero__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
to tell if room needs to be part of the sync result.
"""
@@ -81,23 +96,17 @@ class TimelineBatch(
__bool__ = __nonzero__ # python3
-class JoinedSyncResult(
- collections.namedtuple(
- "JoinedSyncResult",
- [
- "room_id", # str
- "timeline", # TimelineBatch
- "state", # dict[(str, str), FrozenEvent]
- "ephemeral",
- "account_data",
- "unread_notifications",
- "summary",
- ],
- )
-):
- __slots__ = []
-
- def __nonzero__(self):
+@attr.s(slots=True, frozen=True)
+class JoinedSyncResult:
+ room_id = attr.ib(type=str)
+ timeline = attr.ib(type=TimelineBatch)
+ state = attr.ib(type=StateMap[EventBase])
+ ephemeral = attr.ib(type=List[JsonDict])
+ account_data = attr.ib(type=List[JsonDict])
+ unread_notifications = attr.ib(type=JsonDict)
+ summary = attr.ib(type=Optional[JsonDict])
+
+ def __nonzero__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
to tell if room needs to be part of the sync result.
"""
@@ -113,20 +122,14 @@ class JoinedSyncResult(
__bool__ = __nonzero__ # python3
-class ArchivedSyncResult(
- collections.namedtuple(
- "ArchivedSyncResult",
- [
- "room_id", # str
- "timeline", # TimelineBatch
- "state", # dict[(str, str), FrozenEvent]
- "account_data",
- ],
- )
-):
- __slots__ = []
-
- def __nonzero__(self):
+@attr.s(slots=True, frozen=True)
+class ArchivedSyncResult:
+ room_id = attr.ib(type=str)
+ timeline = attr.ib(type=TimelineBatch)
+ state = attr.ib(type=StateMap[EventBase])
+ account_data = attr.ib(type=List[JsonDict])
+
+ def __nonzero__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
to tell if room needs to be part of the sync result.
"""
@@ -135,70 +138,88 @@ class ArchivedSyncResult(
__bool__ = __nonzero__ # python3
-class InvitedSyncResult(
- collections.namedtuple(
- "InvitedSyncResult",
- ["room_id", "invite"], # str # FrozenEvent: the invite event
- )
-):
- __slots__ = []
+@attr.s(slots=True, frozen=True)
+class InvitedSyncResult:
+ room_id = attr.ib(type=str)
+ invite = attr.ib(type=EventBase)
- def __nonzero__(self):
+ def __nonzero__(self) -> bool:
"""Invited rooms should always be reported to the client"""
return True
__bool__ = __nonzero__ # python3
-class GroupsSyncResult(
- collections.namedtuple("GroupsSyncResult", ["join", "invite", "leave"])
-):
- __slots__ = []
+@attr.s(slots=True, frozen=True)
+class GroupsSyncResult:
+ join = attr.ib(type=JsonDict)
+ invite = attr.ib(type=JsonDict)
+ leave = attr.ib(type=JsonDict)
- def __nonzero__(self):
+ def __nonzero__(self) -> bool:
return bool(self.join or self.invite or self.leave)
__bool__ = __nonzero__ # python3
-class DeviceLists(
- collections.namedtuple(
- "DeviceLists",
- [
- "changed", # list of user_ids whose devices may have changed
- "left", # list of user_ids whose devices we no longer track
- ],
- )
-):
- __slots__ = []
+@attr.s(slots=True, frozen=True)
+class DeviceLists:
+ """
+ Attributes:
+ changed: List of user_ids whose devices may have changed
+ left: List of user_ids whose devices we no longer track
+ """
+
+ changed = attr.ib(type=Collection[str])
+ left = attr.ib(type=Collection[str])
- def __nonzero__(self):
+ def __nonzero__(self) -> bool:
return bool(self.changed or self.left)
__bool__ = __nonzero__ # python3
-class SyncResult(
- collections.namedtuple(
- "SyncResult",
- [
- "next_batch", # Token for the next sync
- "presence", # List of presence events for the user.
- "account_data", # List of account_data events for the user.
- "joined", # JoinedSyncResult for each joined room.
- "invited", # InvitedSyncResult for each invited room.
- "archived", # ArchivedSyncResult for each archived room.
- "to_device", # List of direct messages for the device.
- "device_lists", # List of user_ids whose devices have changed
- "device_one_time_keys_count", # Dict of algorithm to count for one time keys
- # for this device
- "groups",
- ],
- )
-):
- __slots__ = []
-
- def __nonzero__(self):
+@attr.s
+class _RoomChanges:
+ """The set of room entries to include in the sync, plus the set of joined
+ and left room IDs since last sync.
+ """
+
+ room_entries = attr.ib(type=List["RoomSyncResultBuilder"])
+ invited = attr.ib(type=List[InvitedSyncResult])
+ newly_joined_rooms = attr.ib(type=List[str])
+ newly_left_rooms = attr.ib(type=List[str])
+
+
+@attr.s(slots=True, frozen=True)
+class SyncResult:
+ """
+ Attributes:
+ next_batch: Token for the next sync
+ presence: List of presence events for the user.
+ account_data: List of account_data events for the user.
+ joined: JoinedSyncResult for each joined room.
+ invited: InvitedSyncResult for each invited room.
+ archived: ArchivedSyncResult for each archived room.
+ to_device: List of direct messages for the device.
+ device_lists: List of user_ids whose devices have changed
+ device_one_time_keys_count: Dict of algorithm to count for one time keys
+ for this device
+ groups: Group updates, if any
+ """
+
+ next_batch = attr.ib(type=StreamToken)
+ presence = attr.ib(type=List[JsonDict])
+ account_data = attr.ib(type=List[JsonDict])
+ joined = attr.ib(type=List[JoinedSyncResult])
+ invited = attr.ib(type=List[InvitedSyncResult])
+ archived = attr.ib(type=List[ArchivedSyncResult])
+ to_device = attr.ib(type=List[JsonDict])
+ device_lists = attr.ib(type=DeviceLists)
+ device_one_time_keys_count = attr.ib(type=JsonDict)
+ groups = attr.ib(type=Optional[GroupsSyncResult])
+
+ def __nonzero__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
to tell if the notifier needs to wait for more events when polling for
events.
@@ -240,13 +261,15 @@ class SyncHandler(object):
)
async def wait_for_sync_for_user(
- self, sync_config, since_token=None, timeout=0, full_state=False
- ):
+ self,
+ sync_config: SyncConfig,
+ since_token: Optional[StreamToken] = None,
+ timeout: int = 0,
+ full_state: bool = False,
+ ) -> SyncResult:
"""Get the sync for a client if we have new data for it now. Otherwise
wait for new data to arrive on the server. If the timeout expires, then
return an empty sync result.
- Returns:
- Deferred[SyncResult]
"""
# If the user is not part of the mau group, then check that limits have
# not been exceeded (if not part of the group by this point, almost certain
@@ -265,8 +288,12 @@ class SyncHandler(object):
return res
async def _wait_for_sync_for_user(
- self, sync_config, since_token, timeout, full_state
- ):
+ self,
+ sync_config: SyncConfig,
+ since_token: Optional[StreamToken] = None,
+ timeout: int = 0,
+ full_state: bool = False,
+ ) -> SyncResult:
if since_token is None:
sync_type = "initial_sync"
elif full_state:
@@ -305,25 +332,33 @@ class SyncHandler(object):
return result
- def current_sync_for_user(self, sync_config, since_token=None, full_state=False):
+ async def current_sync_for_user(
+ self,
+ sync_config: SyncConfig,
+ since_token: Optional[StreamToken] = None,
+ full_state: bool = False,
+ ) -> SyncResult:
"""Get the sync for client needed to match what the server has now.
- Returns:
- A Deferred SyncResult.
"""
- return self.generate_sync_result(sync_config, since_token, full_state)
+ return await self.generate_sync_result(sync_config, since_token, full_state)
- async def push_rules_for_user(self, user):
+ async def push_rules_for_user(self, user: UserID) -> JsonDict:
user_id = user.to_string()
rules = await self.store.get_push_rules_for_user(user_id)
rules = format_push_rules_for_user(user, rules)
return rules
- async def ephemeral_by_room(self, sync_result_builder, now_token, since_token=None):
+ async def ephemeral_by_room(
+ self,
+ sync_result_builder: "SyncResultBuilder",
+ now_token: StreamToken,
+ since_token: Optional[StreamToken] = None,
+ ) -> Tuple[StreamToken, Dict[str, List[JsonDict]]]:
"""Get the ephemeral events for each room the user is in
Args:
- sync_result_builder(SyncResultBuilder)
- now_token (StreamToken): Where the server is currently up to.
- since_token (StreamToken): Where the server was when the client
+ sync_result_builder
+ now_token: Where the server is currently up to.
+ since_token: Where the server was when the client
last synced.
Returns:
A tuple of the now StreamToken, updated to reflect the which typing
@@ -348,7 +383,7 @@ class SyncHandler(object):
)
now_token = now_token.copy_and_replace("typing_key", typing_key)
- ephemeral_by_room = {}
+ ephemeral_by_room = {} # type: JsonDict
for event in typing:
# we want to exclude the room_id from the event, but modifying the
@@ -380,13 +415,13 @@ class SyncHandler(object):
async def _load_filtered_recents(
self,
- room_id,
- sync_config,
- now_token,
- since_token=None,
- recents=None,
- newly_joined_room=False,
- ):
+ room_id: str,
+ sync_config: SyncConfig,
+ now_token: StreamToken,
+ since_token: Optional[StreamToken] = None,
+ potential_recents: Optional[List[EventBase]] = None,
+ newly_joined_room: bool = False,
+ ) -> TimelineBatch:
"""
Returns:
a Deferred TimelineBatch
@@ -397,21 +432,29 @@ class SyncHandler(object):
sync_config.filter_collection.blocks_all_room_timeline()
)
- if recents is None or newly_joined_room or timeline_limit < len(recents):
+ if (
+ potential_recents is None
+ or newly_joined_room
+ or timeline_limit < len(potential_recents)
+ ):
limited = True
else:
limited = False
- if recents:
- recents = sync_config.filter_collection.filter_room_timeline(recents)
+ if potential_recents:
+ recents = sync_config.filter_collection.filter_room_timeline(
+ potential_recents
+ )
# We check if there are any state events, if there are then we pass
# all current state events to the filter_events function. This is to
# ensure that we always include current state in the timeline
- current_state_ids = frozenset()
+ current_state_ids = frozenset() # type: FrozenSet[str]
if any(e.is_state() for e in recents):
- current_state_ids = await self.state.get_current_state_ids(room_id)
- current_state_ids = frozenset(itervalues(current_state_ids))
+ current_state_ids_map = await self.state.get_current_state_ids(
+ room_id
+ )
+ current_state_ids = frozenset(itervalues(current_state_ids_map))
recents = await filter_events_for_client(
self.storage,
@@ -463,8 +506,10 @@ class SyncHandler(object):
# ensure that we always include current state in the timeline
current_state_ids = frozenset()
if any(e.is_state() for e in loaded_recents):
- current_state_ids = await self.state.get_current_state_ids(room_id)
- current_state_ids = frozenset(itervalues(current_state_ids))
+ current_state_ids_map = await self.state.get_current_state_ids(
+ room_id
+ )
+ current_state_ids = frozenset(itervalues(current_state_ids_map))
loaded_recents = await filter_events_for_client(
self.storage,
@@ -493,17 +538,15 @@ class SyncHandler(object):
limited=limited or newly_joined_room,
)
- async def get_state_after_event(self, event, state_filter=StateFilter.all()):
+ async def get_state_after_event(
+ self, event: EventBase, state_filter: StateFilter = StateFilter.all()
+ ) -> StateMap[str]:
"""
Get the room state after the given event
Args:
- event(synapse.events.EventBase): event of interest
- state_filter (StateFilter): The state filter used to fetch state
- from the database.
-
- Returns:
- A Deferred map from ((type, state_key)->Event)
+ event: event of interest
+ state_filter: The state filter used to fetch state from the database.
"""
state_ids = await self.state_store.get_state_ids_for_event(
event.event_id, state_filter=state_filter
@@ -514,18 +557,17 @@ class SyncHandler(object):
return state_ids
async def get_state_at(
- self, room_id, stream_position, state_filter=StateFilter.all()
- ):
+ self,
+ room_id: str,
+ stream_position: StreamToken,
+ state_filter: StateFilter = StateFilter.all(),
+ ) -> StateMap[str]:
""" Get the room state at a particular stream position
Args:
- room_id(str): room for which to get state
- stream_position(StreamToken): point at which to get state
- state_filter (StateFilter): The state filter used to fetch state
- from the database.
-
- Returns:
- A Deferred map from ((type, state_key)->Event)
+ room_id: room for which to get state
+ stream_position: point at which to get state
+ state_filter: The state filter used to fetch state from the database.
"""
# FIXME this claims to get the state at a stream position, but
# get_recent_events_for_room operates by topo ordering. This therefore
@@ -546,23 +588,25 @@ class SyncHandler(object):
state = {}
return state
- async def compute_summary(self, room_id, sync_config, batch, state, now_token):
+ async def compute_summary(
+ self,
+ room_id: str,
+ sync_config: SyncConfig,
+ batch: TimelineBatch,
+ state: StateMap[EventBase],
+ now_token: StreamToken,
+ ) -> Optional[JsonDict]:
""" Works out a room summary block for this room, summarising the number
of joined members in the room, and providing the 'hero' members if the
room has no name so clients can consistently name rooms. Also adds
state events to 'state' if needed to describe the heroes.
- Args:
- room_id(str):
- sync_config(synapse.handlers.sync.SyncConfig):
- batch(synapse.handlers.sync.TimelineBatch): The timeline batch for
- the room that will be sent to the user.
- state(dict): dict of (type, state_key) -> Event as returned by
- compute_state_delta
- now_token(str): Token of the end of the current batch.
-
- Returns:
- A deferred dict describing the room summary
+ Args
+ room_id
+ sync_config
+ batch: The timeline batch for the room that will be sent to the user.
+ state: State as returned by compute_state_delta
+ now_token: Token of the end of the current batch.
"""
# FIXME: we could/should get this from room_stats when matthew/stats lands
@@ -681,7 +725,7 @@ class SyncHandler(object):
return summary
- def get_lazy_loaded_members_cache(self, cache_key):
+ def get_lazy_loaded_members_cache(self, cache_key: Tuple[str, str]) -> LruCache:
cache = self.lazy_loaded_members_cache.get(cache_key)
if cache is None:
logger.debug("creating LruCache for %r", cache_key)
@@ -692,23 +736,24 @@ class SyncHandler(object):
return cache
async def compute_state_delta(
- self, room_id, batch, sync_config, since_token, now_token, full_state
- ):
+ self,
+ room_id: str,
+ batch: TimelineBatch,
+ sync_config: SyncConfig,
+ since_token: Optional[StreamToken],
+ now_token: StreamToken,
+ full_state: bool,
+ ) -> StateMap[EventBase]:
""" Works out the difference in state between the start of the timeline
and the previous sync.
Args:
- room_id(str):
- batch(synapse.handlers.sync.TimelineBatch): The timeline batch for
- the room that will be sent to the user.
- sync_config(synapse.handlers.sync.SyncConfig):
- since_token(str|None): Token of the end of the previous batch. May
- be None.
- now_token(str): Token of the end of the current batch.
- full_state(bool): Whether to force returning the full state.
-
- Returns:
- A deferred dict of (type, state_key) -> Event
+ room_id:
+ batch: The timeline batch for the room that will be sent to the user.
+ sync_config:
+ since_token: Token of the end of the previous batch. May be None.
+ now_token: Token of the end of the current batch.
+ full_state: Whether to force returning the full state.
"""
# TODO(mjark) Check if the state events were received by the server
# after the previous sync, since we need to include those state
@@ -800,6 +845,10 @@ class SyncHandler(object):
# about them).
state_filter = StateFilter.all()
+ # If this is an initial sync then full_state should be set, and
+ # that case is handled above. We assert here to ensure that this
+ # is indeed the case.
+ assert since_token is not None
state_at_previous_sync = await self.get_state_at(
room_id, stream_position=since_token, state_filter=state_filter
)
@@ -874,7 +923,7 @@ class SyncHandler(object):
if t[0] == EventTypes.Member:
cache.set(t[1], event_id)
- state = {}
+ state = {} # type: Dict[str, EventBase]
if state_ids:
state = await self.store.get_events(list(state_ids.values()))
@@ -886,7 +935,9 @@ class SyncHandler(object):
if e.type != EventTypes.Aliases # until MSC2261 or alternative solution
}
- async def unread_notifs_for_room_id(self, room_id, sync_config):
+ async def unread_notifs_for_room_id(
+ self, room_id: str, sync_config: SyncConfig
+ ) -> Optional[Dict[str, str]]:
with Measure(self.clock, "unread_notifs_for_room_id"):
last_unread_event_id = await self.store.get_last_receipt_event_id_for_user(
user_id=sync_config.user.to_string(),
@@ -894,7 +945,6 @@ class SyncHandler(object):
receipt_type="m.read",
)
- notifs = []
if last_unread_event_id:
notifs = await self.store.get_unread_event_push_actions_by_room_for_user(
room_id, sync_config.user.to_string(), last_unread_event_id
@@ -906,17 +956,12 @@ class SyncHandler(object):
return None
async def generate_sync_result(
- self, sync_config, since_token=None, full_state=False
- ):
+ self,
+ sync_config: SyncConfig,
+ since_token: Optional[StreamToken] = None,
+ full_state: bool = False,
+ ) -> SyncResult:
"""Generates a sync result.
-
- Args:
- sync_config (SyncConfig)
- since_token (StreamToken)
- full_state (bool)
-
- Returns:
- Deferred(SyncResult)
"""
# NB: The now_token gets changed by some of the generate_sync_* methods,
# this is due to some of the underlying streams not supporting the ability
@@ -924,7 +969,7 @@ class SyncHandler(object):
# Always use the `now_token` in `SyncResultBuilder`
now_token = await self.event_sources.get_current_token()
- logger.info(
+ logger.debug(
"Calculating sync response for %r between %s and %s",
sync_config.user,
since_token,
@@ -978,7 +1023,7 @@ class SyncHandler(object):
)
device_id = sync_config.device_id
- one_time_key_counts = {}
+ one_time_key_counts = {} # type: JsonDict
if device_id:
one_time_key_counts = await self.store.count_e2e_one_time_keys(
user_id, device_id
@@ -1008,7 +1053,9 @@ class SyncHandler(object):
)
@measure_func("_generate_sync_entry_for_groups")
- async def _generate_sync_entry_for_groups(self, sync_result_builder):
+ async def _generate_sync_entry_for_groups(
+ self, sync_result_builder: "SyncResultBuilder"
+ ) -> None:
user_id = sync_result_builder.sync_config.user.to_string()
since_token = sync_result_builder.since_token
now_token = sync_result_builder.now_token
@@ -1053,27 +1100,22 @@ class SyncHandler(object):
@measure_func("_generate_sync_entry_for_device_list")
async def _generate_sync_entry_for_device_list(
self,
- sync_result_builder,
- newly_joined_rooms,
- newly_joined_or_invited_users,
- newly_left_rooms,
- newly_left_users,
- ):
+ sync_result_builder: "SyncResultBuilder",
+ newly_joined_rooms: Set[str],
+ newly_joined_or_invited_users: Set[str],
+ newly_left_rooms: Set[str],
+ newly_left_users: Set[str],
+ ) -> DeviceLists:
"""Generate the DeviceLists section of sync
Args:
- sync_result_builder (SyncResultBuilder)
- newly_joined_rooms (set[str]): Set of rooms user has joined since
- previous sync
- newly_joined_or_invited_users (set[str]): Set of users that have
- joined or been invited to a room since previous sync.
- newly_left_rooms (set[str]): Set of rooms user has left since
+ sync_result_builder
+ newly_joined_rooms: Set of rooms user has joined since previous sync
+ newly_joined_or_invited_users: Set of users that have joined or
+ been invited to a room since previous sync.
+ newly_left_rooms: Set of rooms user has left since previous sync
+ newly_left_users: Set of users that have left a room we're in since
previous sync
- newly_left_users (set[str]): Set of users that have left a room
- we're in since previous sync
-
- Returns:
- Deferred[DeviceLists]
"""
user_id = sync_result_builder.sync_config.user.to_string()
@@ -1134,15 +1176,11 @@ class SyncHandler(object):
else:
return DeviceLists(changed=[], left=[])
- async def _generate_sync_entry_for_to_device(self, sync_result_builder):
+ async def _generate_sync_entry_for_to_device(
+ self, sync_result_builder: "SyncResultBuilder"
+ ) -> None:
"""Generates the portion of the sync response. Populates
`sync_result_builder` with the result.
-
- Args:
- sync_result_builder(SyncResultBuilder)
-
- Returns:
- Deferred(dict): A dictionary containing the per room account data.
"""
user_id = sync_result_builder.sync_config.user.to_string()
device_id = sync_result_builder.sync_config.device_id
@@ -1180,15 +1218,17 @@ class SyncHandler(object):
else:
sync_result_builder.to_device = []
- async def _generate_sync_entry_for_account_data(self, sync_result_builder):
+ async def _generate_sync_entry_for_account_data(
+ self, sync_result_builder: "SyncResultBuilder"
+ ) -> Dict[str, Dict[str, JsonDict]]:
"""Generates the account data portion of the sync response. Populates
`sync_result_builder` with the result.
Args:
- sync_result_builder(SyncResultBuilder)
+ sync_result_builder
Returns:
- Deferred(dict): A dictionary containing the per room account data.
+ A dictionary containing the per room account data.
"""
sync_config = sync_result_builder.sync_config
user_id = sync_result_builder.sync_config.user.to_string()
@@ -1232,18 +1272,21 @@ class SyncHandler(object):
return account_data_by_room
async def _generate_sync_entry_for_presence(
- self, sync_result_builder, newly_joined_rooms, newly_joined_or_invited_users
- ):
+ self,
+ sync_result_builder: "SyncResultBuilder",
+ newly_joined_rooms: Set[str],
+ newly_joined_or_invited_users: Set[str],
+ ) -> None:
"""Generates the presence portion of the sync response. Populates the
`sync_result_builder` with the result.
Args:
- sync_result_builder(SyncResultBuilder)
- newly_joined_rooms(list): List of rooms that the user has joined
- since the last sync (or empty if an initial sync)
- newly_joined_or_invited_users(list): List of users that have joined
- or been invited to rooms since the last sync (or empty if an initial
- sync)
+ sync_result_builder
+ newly_joined_rooms: Set of rooms that the user has joined since
+ the last sync (or empty if an initial sync)
+ newly_joined_or_invited_users: Set of users that have joined or
+ been invited to rooms since the last sync (or empty if an
+ initial sync)
"""
now_token = sync_result_builder.now_token
sync_config = sync_result_builder.sync_config
@@ -1287,17 +1330,19 @@ class SyncHandler(object):
sync_result_builder.presence = presence
async def _generate_sync_entry_for_rooms(
- self, sync_result_builder, account_data_by_room
- ):
+ self,
+ sync_result_builder: "SyncResultBuilder",
+ account_data_by_room: Dict[str, Dict[str, JsonDict]],
+ ) -> Tuple[Set[str], Set[str], Set[str], Set[str]]:
"""Generates the rooms portion of the sync response. Populates the
`sync_result_builder` with the result.
Args:
- sync_result_builder(SyncResultBuilder)
- account_data_by_room(dict): Dictionary of per room account data
+ sync_result_builder
+ account_data_by_room: Dictionary of per room account data
Returns:
- Deferred(tuple): Returns a 4-tuple of
+ Returns a 4-tuple of
`(newly_joined_rooms, newly_joined_or_invited_users,
newly_left_rooms, newly_left_users)`
"""
@@ -1308,7 +1353,7 @@ class SyncHandler(object):
)
if block_all_room_ephemeral:
- ephemeral_by_room = {}
+ ephemeral_by_room = {} # type: Dict[str, List[JsonDict]]
else:
now_token, ephemeral_by_room = await self.ephemeral_by_room(
sync_result_builder,
@@ -1329,7 +1374,7 @@ class SyncHandler(object):
)
if not tags_by_room:
logger.debug("no-oping sync")
- return [], [], [], []
+ return set(), set(), set(), set()
ignored_account_data = await self.store.get_global_account_data_by_type_for_user(
"m.ignored_user_list", user_id=user_id
@@ -1341,19 +1386,22 @@ class SyncHandler(object):
ignored_users = frozenset()
if since_token:
- res = await self._get_rooms_changed(sync_result_builder, ignored_users)
- room_entries, invited, newly_joined_rooms, newly_left_rooms = res
-
+ room_changes = await self._get_rooms_changed(
+ sync_result_builder, ignored_users
+ )
tags_by_room = await self.store.get_updated_tags(
user_id, since_token.account_data_key
)
else:
- res = await self._get_all_rooms(sync_result_builder, ignored_users)
- room_entries, invited, newly_joined_rooms = res
- newly_left_rooms = []
+ room_changes = await self._get_all_rooms(sync_result_builder, ignored_users)
tags_by_room = await self.store.get_tags_for_user(user_id)
+ room_entries = room_changes.room_entries
+ invited = room_changes.invited
+ newly_joined_rooms = room_changes.newly_joined_rooms
+ newly_left_rooms = room_changes.newly_left_rooms
+
def handle_room_entries(room_entry):
return self._generate_room_entry(
sync_result_builder,
@@ -1393,13 +1441,15 @@ class SyncHandler(object):
newly_left_users -= newly_joined_or_invited_users
return (
- newly_joined_rooms,
+ set(newly_joined_rooms),
newly_joined_or_invited_users,
- newly_left_rooms,
+ set(newly_left_rooms),
newly_left_users,
)
- async def _have_rooms_changed(self, sync_result_builder):
+ async def _have_rooms_changed(
+ self, sync_result_builder: "SyncResultBuilder"
+ ) -> bool:
"""Returns whether there may be any new events that should be sent down
the sync. Returns True if there are.
"""
@@ -1423,22 +1473,10 @@ class SyncHandler(object):
return True
return False
- async def _get_rooms_changed(self, sync_result_builder, ignored_users):
+ async def _get_rooms_changed(
+ self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str]
+ ) -> _RoomChanges:
"""Gets the the changes that have happened since the last sync.
-
- Args:
- sync_result_builder(SyncResultBuilder)
- ignored_users(set(str)): Set of users ignored by user.
-
- Returns:
- Deferred(tuple): Returns a tuple of the form:
- `(room_entries, invited_rooms, newly_joined_rooms, newly_left_rooms)`
-
- where:
- room_entries is a list [RoomSyncResultBuilder]
- invited_rooms is a list [InvitedSyncResult]
- newly_joined_rooms is a list[str] of room ids
- newly_left_rooms is a list[str] of room ids
"""
user_id = sync_result_builder.sync_config.user.to_string()
since_token = sync_result_builder.since_token
@@ -1452,7 +1490,7 @@ class SyncHandler(object):
user_id, since_token.room_key, now_token.room_key
)
- mem_change_events_by_room_id = {}
+ mem_change_events_by_room_id = {} # type: Dict[str, List[EventBase]]
for event in rooms_changed:
mem_change_events_by_room_id.setdefault(event.room_id, []).append(event)
@@ -1461,7 +1499,7 @@ class SyncHandler(object):
room_entries = []
invited = []
for room_id, events in iteritems(mem_change_events_by_room_id):
- logger.info(
+ logger.debug(
"Membership changes in %s: [%s]",
room_id,
", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)),
@@ -1571,7 +1609,7 @@ class SyncHandler(object):
# This is all screaming out for a refactor, as the logic here is
# subtle and the moving parts numerous.
if leave_event.internal_metadata.is_out_of_band_membership():
- batch_events = [leave_event]
+ batch_events = [leave_event] # type: Optional[List[EventBase]]
else:
batch_events = None
@@ -1637,18 +1675,17 @@ class SyncHandler(object):
)
room_entries.append(entry)
- return room_entries, invited, newly_joined_rooms, newly_left_rooms
+ return _RoomChanges(room_entries, invited, newly_joined_rooms, newly_left_rooms)
- async def _get_all_rooms(self, sync_result_builder, ignored_users):
+ async def _get_all_rooms(
+ self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str]
+ ) -> _RoomChanges:
"""Returns entries for all rooms for the user.
Args:
- sync_result_builder(SyncResultBuilder)
- ignored_users(set(str)): Set of users ignored by user.
+ sync_result_builder
+ ignored_users: Set of users ignored by user.
- Returns:
- Deferred(tuple): Returns a tuple of the form:
- `([RoomSyncResultBuilder], [InvitedSyncResult], [])`
"""
user_id = sync_result_builder.sync_config.user.to_string()
@@ -1710,30 +1747,30 @@ class SyncHandler(object):
)
)
- return room_entries, invited, []
+ return _RoomChanges(room_entries, invited, [], [])
async def _generate_room_entry(
self,
- sync_result_builder,
- ignored_users,
- room_builder,
- ephemeral,
- tags,
- account_data,
- always_include=False,
+ sync_result_builder: "SyncResultBuilder",
+ ignored_users: Set[str],
+ room_builder: "RoomSyncResultBuilder",
+ ephemeral: List[JsonDict],
+ tags: Optional[List[JsonDict]],
+ account_data: Dict[str, JsonDict],
+ always_include: bool = False,
):
"""Populates the `joined` and `archived` section of `sync_result_builder`
based on the `room_builder`.
Args:
- sync_result_builder(SyncResultBuilder)
- ignored_users(set(str)): Set of users ignored by user.
- room_builder(RoomSyncResultBuilder)
- ephemeral(list): List of new ephemeral events for room
- tags(list): List of *all* tags for room, or None if there has been
+ sync_result_builder
+ ignored_users: Set of users ignored by user.
+ room_builder
+ ephemeral: List of new ephemeral events for room
+ tags: List of *all* tags for room, or None if there has been
no change.
- account_data(list): List of new account data for room
- always_include(bool): Always include this room in the sync response,
+ account_data: List of new account data for room
+ always_include: Always include this room in the sync response,
even if empty.
"""
newly_joined = room_builder.newly_joined
@@ -1759,7 +1796,7 @@ class SyncHandler(object):
sync_config,
now_token=upto_token,
since_token=since_token,
- recents=events,
+ potential_recents=events,
newly_joined_room=newly_joined,
)
@@ -1810,7 +1847,7 @@ class SyncHandler(object):
room_id, batch, sync_config, since_token, now_token, full_state=full_state
)
- summary = {}
+ summary = {} # type: Optional[JsonDict]
# we include a summary in room responses when we're lazy loading
# members (as the client otherwise doesn't have enough info to form
@@ -1834,7 +1871,7 @@ class SyncHandler(object):
)
if room_builder.rtype == "joined":
- unread_notifications = {}
+ unread_notifications = {} # type: Dict[str, str]
room_sync = JoinedSyncResult(
room_id=room_id,
timeline=batch,
@@ -1856,23 +1893,25 @@ class SyncHandler(object):
if batch.limited and since_token:
user_id = sync_result_builder.sync_config.user.to_string()
- logger.info(
+ logger.debug(
"Incremental gappy sync of %s for user %s with %d state events"
% (room_id, user_id, len(state))
)
elif room_builder.rtype == "archived":
- room_sync = ArchivedSyncResult(
+ archived_room_sync = ArchivedSyncResult(
room_id=room_id,
timeline=batch,
state=state,
account_data=account_data_events,
)
- if room_sync or always_include:
- sync_result_builder.archived.append(room_sync)
+ if archived_room_sync or always_include:
+ sync_result_builder.archived.append(archived_room_sync)
else:
raise Exception("Unrecognized rtype: %r", room_builder.rtype)
- async def get_rooms_for_user_at(self, user_id, stream_ordering):
+ async def get_rooms_for_user_at(
+ self, user_id: str, stream_ordering: int
+ ) -> FrozenSet[str]:
"""Get set of joined rooms for a user at the given stream ordering.
The stream ordering *must* be recent, otherwise this may throw an
@@ -1880,12 +1919,11 @@ class SyncHandler(object):
current token, which should be perfectly fine).
Args:
- user_id (str)
- stream_ordering (int)
+ user_id
+ stream_ordering
ReturnValue:
- Deferred[frozenset[str]]: Set of room_ids the user is in at given
- stream_ordering.
+ Set of room_ids the user is in at given stream_ordering.
"""
joined_rooms = await self.store.get_rooms_for_user_with_stream_ordering(user_id)
@@ -1912,11 +1950,10 @@ class SyncHandler(object):
if user_id in users_in_room:
joined_room_ids.add(room_id)
- joined_room_ids = frozenset(joined_room_ids)
- return joined_room_ids
+ return frozenset(joined_room_ids)
-def _action_has_highlight(actions):
+def _action_has_highlight(actions: List[JsonDict]) -> bool:
for action in actions:
try:
if action.get("set_tweak", None) == "highlight":
@@ -1928,22 +1965,23 @@ def _action_has_highlight(actions):
def _calculate_state(
- timeline_contains, timeline_start, previous, current, lazy_load_members
-):
+ timeline_contains: StateMap[str],
+ timeline_start: StateMap[str],
+ previous: StateMap[str],
+ current: StateMap[str],
+ lazy_load_members: bool,
+) -> StateMap[str]:
"""Works out what state to include in a sync response.
Args:
- timeline_contains (dict): state in the timeline
- timeline_start (dict): state at the start of the timeline
- previous (dict): state at the end of the previous sync (or empty dict
+ timeline_contains: state in the timeline
+ timeline_start: state at the start of the timeline
+ previous: state at the end of the previous sync (or empty dict
if this is an initial sync)
- current (dict): state at the end of the timeline
- lazy_load_members (bool): whether to return members from timeline_start
+ current: state at the end of the timeline
+ lazy_load_members: whether to return members from timeline_start
or not. assumes that timeline_start has already been filtered to
include only the members the client needs to know about.
-
- Returns:
- dict
"""
event_id_to_key = {
e: key
@@ -1980,15 +2018,16 @@ def _calculate_state(
return {event_id_to_key[e]: e for e in state_ids}
-class SyncResultBuilder(object):
+@attr.s
+class SyncResultBuilder:
"""Used to help build up a new SyncResult for a user
Attributes:
- sync_config (SyncConfig)
- full_state (bool)
- since_token (StreamToken)
- now_token (StreamToken)
- joined_room_ids (list[str])
+ sync_config
+ full_state: The full_state flag as specified by user
+ since_token: The token supplied by user, or None.
+ now_token: The token to sync up to.
+ joined_room_ids: List of rooms the user is joined to
# The following mirror the fields in a sync response
presence (list)
@@ -1996,61 +2035,45 @@ class SyncResultBuilder(object):
joined (list[JoinedSyncResult])
invited (list[InvitedSyncResult])
archived (list[ArchivedSyncResult])
- device (list)
groups (GroupsSyncResult|None)
to_device (list)
"""
- def __init__(
- self, sync_config, full_state, since_token, now_token, joined_room_ids
- ):
- """
- Args:
- sync_config (SyncConfig)
- full_state (bool): The full_state flag as specified by user
- since_token (StreamToken): The token supplied by user, or None.
- now_token (StreamToken): The token to sync up to.
- joined_room_ids (list[str]): List of rooms the user is joined to
- """
- self.sync_config = sync_config
- self.full_state = full_state
- self.since_token = since_token
- self.now_token = now_token
- self.joined_room_ids = joined_room_ids
-
- self.presence = []
- self.account_data = []
- self.joined = []
- self.invited = []
- self.archived = []
- self.device = []
- self.groups = None
- self.to_device = []
+ sync_config = attr.ib(type=SyncConfig)
+ full_state = attr.ib(type=bool)
+ since_token = attr.ib(type=Optional[StreamToken])
+ now_token = attr.ib(type=StreamToken)
+ joined_room_ids = attr.ib(type=FrozenSet[str])
+
+ presence = attr.ib(type=List[JsonDict], default=attr.Factory(list))
+ account_data = attr.ib(type=List[JsonDict], default=attr.Factory(list))
+ joined = attr.ib(type=List[JoinedSyncResult], default=attr.Factory(list))
+ invited = attr.ib(type=List[InvitedSyncResult], default=attr.Factory(list))
+ archived = attr.ib(type=List[ArchivedSyncResult], default=attr.Factory(list))
+ groups = attr.ib(type=Optional[GroupsSyncResult], default=None)
+ to_device = attr.ib(type=List[JsonDict], default=attr.Factory(list))
+@attr.s
class RoomSyncResultBuilder(object):
"""Stores information needed to create either a `JoinedSyncResult` or
`ArchivedSyncResult`.
+
+ Attributes:
+ room_id
+ rtype: One of `"joined"` or `"archived"`
+ events: List of events to include in the room (more events may be added
+ when generating result).
+ newly_joined: If the user has newly joined the room
+ full_state: Whether the full state should be sent in result
+ since_token: Earliest point to return events from, or None
+ upto_token: Latest point to return events from.
"""
- def __init__(
- self, room_id, rtype, events, newly_joined, full_state, since_token, upto_token
- ):
- """
- Args:
- room_id(str)
- rtype(str): One of `"joined"` or `"archived"`
- events(list[FrozenEvent]): List of events to include in the room
- (more events may be added when generating result).
- newly_joined(bool): If the user has newly joined the room
- full_state(bool): Whether the full state should be sent in result
- since_token(StreamToken): Earliest point to return events from, or None
- upto_token(StreamToken): Latest point to return events from.
- """
- self.room_id = room_id
- self.rtype = rtype
- self.events = events
- self.newly_joined = newly_joined
- self.full_state = full_state
- self.since_token = since_token
- self.upto_token = upto_token
+ room_id = attr.ib(type=str)
+ rtype = attr.ib(type=str)
+ events = attr.ib(type=Optional[List[EventBase]])
+ newly_joined = attr.ib(type=bool)
+ full_state = attr.ib(type=bool)
+ since_token = attr.ib(type=Optional[StreamToken])
+ upto_token = attr.ib(type=StreamToken)
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 624f05ab5b..81aa58dc8c 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -149,7 +149,7 @@ class UserDirectoryHandler(StateDeltasHandler):
self.pos, room_max_stream_ordering
)
- logger.info("Handling %d state deltas", len(deltas))
+ logger.debug("Handling %d state deltas", len(deltas))
yield self._handle_deltas(deltas)
self.pos = max_pos
@@ -195,7 +195,7 @@ class UserDirectoryHandler(StateDeltasHandler):
room_id, self.server_name
)
if not is_in_room:
- logger.info("Server left room: %r", room_id)
+ logger.debug("Server left room: %r", room_id)
# Fetch all the users that we marked as being in user
# directory due to being in the room and then check if
# need to remove those users or not
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 911251c0bc..e092193c9c 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -225,7 +225,7 @@ class SynapseRequest(Request):
self.start_time, name=servlet_name, method=self.get_method()
)
- self.site.access_logger.info(
+ self.site.access_logger.debug(
"%s - %s - Received request: %s %s",
self.getClientIP(),
self.site.site_tag,
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index d0879b0490..5bb17d1228 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -398,7 +398,7 @@ class HttpPusher(object):
Args:
badge (int): number of unread messages
"""
- logger.info("Sending updated badge count %d to %s", badge, self.name)
+ logger.debug("Sending updated badge count %d to %s", badge, self.name)
d = {
"notification": {
"id": "",
diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py
index 69a4ae42f9..2d4fd08cf5 100644
--- a/synapse/replication/slave/storage/groups.py
+++ b/synapse/replication/slave/storage/groups.py
@@ -13,15 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.storage import DataStore
+from synapse.replication.slave.storage._base import BaseSlavedStore
+from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
+from synapse.storage.data_stores.main.group_server import GroupServerWorkerStore
from synapse.storage.database import Database
from synapse.util.caches.stream_change_cache import StreamChangeCache
-from ._base import BaseSlavedStore, __func__
-from ._slaved_id_tracker import SlavedIdTracker
-
-class SlavedGroupServerStore(BaseSlavedStore):
+class SlavedGroupServerStore(GroupServerWorkerStore, BaseSlavedStore):
def __init__(self, database: Database, db_conn, hs):
super(SlavedGroupServerStore, self).__init__(database, db_conn, hs)
@@ -35,9 +34,8 @@ class SlavedGroupServerStore(BaseSlavedStore):
self._group_updates_id_gen.get_current_token(),
)
- get_groups_changes_for_user = __func__(DataStore.get_groups_changes_for_user)
- get_group_stream_token = __func__(DataStore.get_group_stream_token)
- get_all_groups_for_user = __func__(DataStore.get_all_groups_for_user)
+ def get_group_stream_token(self):
+ return self._group_updates_id_gen.get_current_token()
def stream_positions(self):
result = super(SlavedGroupServerStore, self).stream_positions()
diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py
index 4a1fc2ec2b..14eca70ba4 100644
--- a/synapse/rest/__init__.py
+++ b/synapse/rest/__init__.py
@@ -41,6 +41,7 @@ from synapse.rest.client.v2_alpha import (
keys,
notifications,
openid,
+ password_policy,
read_marker,
receipts,
register,
@@ -117,6 +118,7 @@ class ClientRestResource(JsonResource):
room_upgrade_rest_servlet.register_servlets(hs, client_resource)
capabilities.register_servlets(hs, client_resource)
account_validity.register_servlets(hs, client_resource)
+ password_policy.register_servlets(hs, client_resource)
relations.register_servlets(hs, client_resource)
# moving to /_synapse/admin
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 3455741195..e75c5f1370 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -105,7 +105,7 @@ class UsersRestServletV2(RestServlet):
class UserRestServletV2(RestServlet):
- PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P<user_id>@[^/]+)$"),)
+ PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P<user_id>[^/]+)$"),)
"""Get request to list user details.
This needs user to have administrator access in Synapse.
@@ -136,6 +136,8 @@ class UserRestServletV2(RestServlet):
self.hs = hs
self.auth = hs.get_auth()
self.admin_handler = hs.get_handlers().admin_handler
+ self.store = hs.get_datastore()
+ self.auth_handler = hs.get_auth_handler()
self.profile_handler = hs.get_profile_handler()
self.set_password_handler = hs.get_set_password_handler()
self.deactivate_account_handler = hs.get_deactivate_account_handler()
@@ -163,6 +165,7 @@ class UserRestServletV2(RestServlet):
raise SynapseError(400, "This endpoint can only be used with local users")
user = await self.admin_handler.get_user(target_user)
+ user_id = target_user.to_string()
if user: # modify user
if "displayname" in body:
@@ -170,6 +173,29 @@ class UserRestServletV2(RestServlet):
target_user, requester, body["displayname"], True
)
+ if "threepids" in body:
+ # check for required parameters for each threepid
+ for threepid in body["threepids"]:
+ assert_params_in_dict(threepid, ["medium", "address"])
+
+ # remove old threepids from user
+ threepids = await self.store.user_get_threepids(user_id)
+ for threepid in threepids:
+ try:
+ await self.auth_handler.delete_threepid(
+ user_id, threepid["medium"], threepid["address"], None
+ )
+ except Exception:
+ logger.exception("Failed to remove threepids")
+ raise SynapseError(500, "Failed to remove threepids")
+
+ # add new threepids to user
+ current_time = self.hs.get_clock().time_msec()
+ for threepid in body["threepids"]:
+ await self.auth_handler.add_threepid(
+ user_id, threepid["medium"], threepid["address"], current_time
+ )
+
if "avatar_url" in body:
await self.profile_handler.set_avatar_url(
target_user, requester, body["avatar_url"], True
@@ -221,6 +247,7 @@ class UserRestServletV2(RestServlet):
admin = body.get("admin", None)
user_type = body.get("user_type", None)
displayname = body.get("displayname", None)
+ threepids = body.get("threepids", None)
if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
raise SynapseError(400, "Invalid user type")
@@ -232,6 +259,18 @@ class UserRestServletV2(RestServlet):
default_display_name=displayname,
user_type=user_type,
)
+
+ if "threepids" in body:
+ # check for required parameters for each threepid
+ for threepid in body["threepids"]:
+ assert_params_in_dict(threepid, ["medium", "address"])
+
+ current_time = self.hs.get_clock().time_msec()
+ for threepid in body["threepids"]:
+ await self.auth_handler.add_threepid(
+ user_id, threepid["medium"], threepid["address"], current_time
+ )
+
if "avatar_url" in body:
await self.profile_handler.set_avatar_url(
user_id, requester, body["avatar_url"], True
@@ -568,7 +607,7 @@ class UserAdminServlet(RestServlet):
{}
"""
- PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P<user_id>@[^/]*)/admin$"),)
+ PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P<user_id>[^/]*)/admin$"),)
def __init__(self, hs):
self.hs = hs
diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py
index e7fe50ed72..165313b572 100644
--- a/synapse/rest/client/v1/profile.py
+++ b/synapse/rest/client/v1/profile.py
@@ -14,6 +14,7 @@
# limitations under the License.
""" This module contains REST servlets to do with profile: /profile/<paths> """
+from twisted.internet import defer
from synapse.api.errors import Codes, SynapseError
from synapse.http.servlet import RestServlet, parse_json_object_from_request
@@ -28,6 +29,7 @@ class ProfileDisplaynameRestServlet(RestServlet):
super(ProfileDisplaynameRestServlet, self).__init__()
self.hs = hs
self.profile_handler = hs.get_profile_handler()
+ self.http_client = hs.get_simple_http_client()
self.auth = hs.get_auth()
async def on_GET(self, request, user_id):
@@ -63,11 +65,27 @@ class ProfileDisplaynameRestServlet(RestServlet):
await self.profile_handler.set_displayname(user, requester, new_name, is_admin)
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(user.localpart, self.hs.config.shadow_server.get("hs"))
+ self.shadow_displayname(shadow_user.to_string(), content)
+
return 200, {}
def on_OPTIONS(self, request, user_id):
return 200, {}
+ @defer.inlineCallbacks
+ def shadow_displayname(self, user_id, body):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.put_json(
+ "%s/_matrix/client/r0/profile/%s/displayname?access_token=%s&user_id=%s"
+ % (shadow_hs_url, user_id, as_token, user_id),
+ body,
+ )
+
class ProfileAvatarURLRestServlet(RestServlet):
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)/avatar_url", v1=True)
@@ -76,6 +94,7 @@ class ProfileAvatarURLRestServlet(RestServlet):
super(ProfileAvatarURLRestServlet, self).__init__()
self.hs = hs
self.profile_handler = hs.get_profile_handler()
+ self.http_client = hs.get_simple_http_client()
self.auth = hs.get_auth()
async def on_GET(self, request, user_id):
@@ -114,11 +133,27 @@ class ProfileAvatarURLRestServlet(RestServlet):
user, requester, new_avatar_url, is_admin
)
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(user.localpart, self.hs.config.shadow_server.get("hs"))
+ self.shadow_avatar_url(shadow_user.to_string(), content)
+
return 200, {}
def on_OPTIONS(self, request, user_id):
return 200, {}
+ @defer.inlineCallbacks
+ def shadow_avatar_url(self, user_id, body):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.put_json(
+ "%s/_matrix/client/r0/profile/%s/avatar_url?access_token=%s&user_id=%s"
+ % (shadow_hs_url, user_id, as_token, user_id),
+ body,
+ )
+
class ProfileRestServlet(RestServlet):
PATTERNS = client_patterns("/profile/(?P<user_id>[^/]*)", v1=True)
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 6f31584c51..976ec6a4f1 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -722,7 +722,8 @@ class RoomMembershipRestServlet(TransactionRestServlet):
content["id_server"],
requester,
txn_id,
- content.get("id_access_token"),
+ new_room=False,
+ id_access_token=content.get("id_access_token"),
)
return 200, {}
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index dc837d6c75..bd1c0efbcb 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
-# Copyright 2018 New Vector Ltd
+# Copyright 2018, 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,9 +15,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
+import re
from six.moves import http_client
+from twisted.internet import defer
+
from synapse.api.constants import LoginType
from synapse.api.errors import Codes, SynapseError, ThreepidValidationError
from synapse.config.emailconfig import ThreepidBehaviour
@@ -28,9 +31,10 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
)
+from synapse.types import UserID
from synapse.push.mailer import Mailer, load_jinja2_templates
from synapse.util.msisdn import phone_number_to_msisdn
-from synapse.util.stringutils import assert_valid_client_secret
+from synapse.util.stringutils import assert_valid_client_secret, random_string
from synapse.util.threepids import check_3pid_allowed
from ._base import client_patterns, interactive_auth_handler
@@ -91,7 +95,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
if not check_3pid_allowed(self.hs, "email", email):
raise SynapseError(
403,
- "Your email domain is not authorized on this server",
+ "Your email is not authorized on this server",
Codes.THREEPID_DENIED,
)
@@ -216,6 +220,7 @@ class PasswordRestServlet(RestServlet):
self.auth_handler = hs.get_auth_handler()
self.datastore = self.hs.get_datastore()
self._set_password_handler = hs.get_set_password_handler()
+ self.http_client = hs.get_simple_http_client()
@interactive_auth_handler
async def on_POST(self, request):
@@ -233,9 +238,13 @@ class PasswordRestServlet(RestServlet):
if self.auth.has_access_token(request):
requester = await self.auth.get_user_by_req(request)
- params = await self.auth_handler.validate_user_via_ui_auth(
- requester, body, self.hs.get_ip_from_request(request)
- )
+ # blindly trust ASes without UI-authing them
+ if requester.app_service:
+ params = body
+ else:
+ params = await self.auth_handler.validate_user_via_ui_auth(
+ requester, body, self.hs.get_ip_from_request(request)
+ )
user_id = requester.user.to_string()
else:
requester = None
@@ -268,11 +277,29 @@ class PasswordRestServlet(RestServlet):
await self._set_password_handler.set_password(user_id, new_password, requester)
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(
+ requester.user.localpart, self.hs.config.shadow_server.get("hs")
+ )
+ self.shadow_password(params, shadow_user.to_string())
+
return 200, {}
def on_OPTIONS(self, _):
return 200, {}
+ @defer.inlineCallbacks
+ def shadow_password(self, body, user_id):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.post_json_get_json(
+ "%s/_matrix/client/r0/account/password?access_token=%s&user_id=%s"
+ % (shadow_hs_url, as_token, user_id),
+ body,
+ )
+
class DeactivateAccountRestServlet(RestServlet):
PATTERNS = client_patterns("/account/deactivate$")
@@ -363,13 +390,15 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
send_attempt = body["send_attempt"]
next_link = body.get("next_link") # Optional param
- if not check_3pid_allowed(self.hs, "email", email):
+ if not (await check_3pid_allowed(self.hs, "email", email)):
raise SynapseError(
403,
- "Your email domain is not authorized on this server",
+ "Your email is not authorized on this server",
Codes.THREEPID_DENIED,
)
+ assert_valid_client_secret(body["client_secret"])
+
existing_user_id = await self.store.get_user_id_by_threepid(
"email", body["email"]
)
@@ -428,13 +457,15 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
msisdn = phone_number_to_msisdn(country, phone_number)
- if not check_3pid_allowed(self.hs, "msisdn", msisdn):
+ if not (await check_3pid_allowed(self.hs, "msisdn", msisdn)):
raise SynapseError(
403,
"Account phone numbers are not authorized on this server",
Codes.THREEPID_DENIED,
)
+ assert_valid_client_secret(body["client_secret"])
+
existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn)
if existing_user_id is not None:
@@ -589,7 +620,8 @@ class ThreepidRestServlet(RestServlet):
self.identity_handler = hs.get_handlers().identity_handler
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
- self.datastore = self.hs.get_datastore()
+ self.datastore = hs.get_datastore()
+ self.http_client = hs.get_simple_http_client()
async def on_GET(self, request):
requester = await self.auth.get_user_by_req(request)
@@ -599,10 +631,33 @@ class ThreepidRestServlet(RestServlet):
return 200, {"threepids": threepids}
async def on_POST(self, request):
+ if self.hs.config.disable_3pid_changes:
+ raise SynapseError(400, "3PID changes disabled on this server")
+
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
+ # skip validation if this is a shadow 3PID from an AS
+ if requester.app_service:
+ # XXX: ASes pass in a validated threepid directly to bypass the IS.
+ # This makes the API entirely change shape when we have an AS token;
+ # it really should be an entirely separate API - perhaps
+ # /account/3pid/replicate or something.
+ threepid = body.get("threepid")
+
+ await self.auth_handler.add_threepid(
+ user_id, threepid["medium"], threepid["address"], threepid["validated_at"]
+ )
+
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(
+ requester.user.localpart, self.hs.config.shadow_server.get("hs")
+ )
+ self.shadow_3pid({"threepid": threepid}, shadow_user.to_string())
+
+ return 200, {}
+
threepid_creds = body.get("threePidCreds") or body.get("three_pid_creds")
if threepid_creds is None:
raise SynapseError(
@@ -624,12 +679,36 @@ class ThreepidRestServlet(RestServlet):
validation_session["address"],
validation_session["validated_at"],
)
+
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(
+ requester.user.localpart, self.hs.config.shadow_server.get("hs")
+ )
+ threepid = {
+ "medium": validation_session["medium"],
+ "address": validation_session["address"],
+ "validated_at": validation_session["validated_at"],
+ }
+ self.shadow_3pid({"threepid": threepid}, shadow_user.to_string())
+
return 200, {}
raise SynapseError(
400, "No validated 3pid session found", Codes.THREEPID_AUTH_FAILED
)
+ @defer.inlineCallbacks
+ def shadow_3pid(self, body, user_id):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.post_json_get_json(
+ "%s/_matrix/client/r0/account/3pid?access_token=%s&user_id=%s"
+ % (shadow_hs_url, as_token, user_id),
+ body,
+ )
+
class ThreepidAddRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/add$", releases=(), unstable=True)
@@ -666,6 +745,16 @@ class ThreepidAddRestServlet(RestServlet):
validation_session["address"],
validation_session["validated_at"],
)
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(
+ requester.user.localpart, self.hs.config.shadow_server.get("hs")
+ )
+ threepid = {
+ "medium": validation_session["medium"],
+ "address": validation_session["address"],
+ "validated_at": validation_session["validated_at"],
+ }
+ self.shadow_3pid({"threepid": threepid}, shadow_user.to_string())
return 200, {}
raise SynapseError(
@@ -701,6 +790,29 @@ class ThreepidBindRestServlet(RestServlet):
return 200, {}
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(
+ requester.user.localpart, self.hs.config.shadow_server.get("hs")
+ )
+ threepid = {
+ "medium": validation_session["medium"],
+ "address": validation_session["address"],
+ "validated_at": validation_session["validated_at"],
+ }
+ self.shadow_3pid({"threepid": threepid}, shadow_user.to_string())
+
+ @defer.inlineCallbacks
+ def shadow_3pid(self, body, user_id):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.post_json_get_json(
+ "%s/_matrix/client/r0/account/3pid?access_token=%s&user_id=%s"
+ % (shadow_hs_url, as_token, user_id),
+ body,
+ )
+
class ThreepidUnbindRestServlet(RestServlet):
PATTERNS = client_patterns("/account/3pid/unbind$", releases=(), unstable=True)
@@ -738,10 +850,15 @@ class ThreepidDeleteRestServlet(RestServlet):
def __init__(self, hs):
super(ThreepidDeleteRestServlet, self).__init__()
+ self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
+ self.http_client = hs.get_simple_http_client()
async def on_POST(self, request):
+ if self.hs.config.disable_3pid_changes:
+ raise SynapseError(400, "3PID changes disabled on this server")
+
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["medium", "address"])
@@ -759,6 +876,12 @@ class ThreepidDeleteRestServlet(RestServlet):
logger.exception("Failed to remove threepid")
raise SynapseError(500, "Failed to remove threepid")
+ if self.hs.config.shadow_server:
+ shadow_user = UserID(
+ requester.user.localpart, self.hs.config.shadow_server.get("hs")
+ )
+ self.shadow_3pid_delete(body, shadow_user.to_string())
+
if ret:
id_server_unbind_result = "success"
else:
@@ -766,6 +889,77 @@ class ThreepidDeleteRestServlet(RestServlet):
return 200, {"id_server_unbind_result": id_server_unbind_result}
+ @defer.inlineCallbacks
+ def shadow_3pid_delete(self, body, user_id):
+ # TODO: retries
+ shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
+ as_token = self.hs.config.shadow_server.get("as_token")
+
+ yield self.http_client.post_json_get_json(
+ "%s/_matrix/client/r0/account/3pid/delete?access_token=%s&user_id=%s"
+ % (shadow_hs_url, as_token, user_id),
+ body,
+ )
+
+
+class ThreepidLookupRestServlet(RestServlet):
+ PATTERNS = [re.compile("^/_matrix/client/unstable/account/3pid/lookup$")]
+
+ def __init__(self, hs):
+ super(ThreepidLookupRestServlet, self).__init__()
+ self.auth = hs.get_auth()
+ self.identity_handler = hs.get_handlers().identity_handler
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ """Proxy a /_matrix/identity/api/v1/lookup request to an identity
+ server
+ """
+ yield self.auth.get_user_by_req(request)
+
+ # Verify query parameters
+ query_params = request.args
+ assert_params_in_dict(query_params, [b"medium", b"address", b"id_server"])
+
+ # Retrieve needed information from query parameters
+ medium = parse_string(request, "medium")
+ address = parse_string(request, "address")
+ id_server = parse_string(request, "id_server")
+
+ # Proxy the request to the identity server. lookup_3pid handles checking
+ # if the lookup is allowed so we don't need to do it here.
+ ret = yield self.identity_handler.proxy_lookup_3pid(id_server, medium, address)
+
+ defer.returnValue((200, ret))
+
+
+class ThreepidBulkLookupRestServlet(RestServlet):
+ PATTERNS = [re.compile("^/_matrix/client/unstable/account/3pid/bulk_lookup$")]
+
+ def __init__(self, hs):
+ super(ThreepidBulkLookupRestServlet, self).__init__()
+ self.auth = hs.get_auth()
+ self.identity_handler = hs.get_handlers().identity_handler
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ """Proxy a /_matrix/identity/api/v1/bulk_lookup request to an identity
+ server
+ """
+ yield self.auth.get_user_by_req(request)
+
+ body = parse_json_object_from_request(request)
+
+ assert_params_in_dict(body, ["threepids", "id_server"])
+
+ # Proxy the request to the identity server. lookup_3pid handles checking
+ # if the lookup is allowed so we don't need to do it here.
+ ret = yield self.identity_handler.proxy_bulk_lookup_3pid(
+ body["id_server"], body["threepids"]
+ )
+
+ defer.returnValue((200, ret))
+
class WhoamiRestServlet(RestServlet):
PATTERNS = client_patterns("/account/whoami$")
@@ -794,4 +988,6 @@ def register_servlets(hs, http_server):
ThreepidBindRestServlet(hs).register(http_server)
ThreepidUnbindRestServlet(hs).register(http_server)
ThreepidDeleteRestServlet(hs).register(http_server)
+ ThreepidLookupRestServlet(hs).register(http_server)
+ ThreepidBulkLookupRestServlet(hs).register(http_server)
WhoamiRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py
index 64eb7fec3b..17495f020b 100644
--- a/synapse/rest/client/v2_alpha/account_data.py
+++ b/synapse/rest/client/v2_alpha/account_data.py
@@ -15,8 +15,11 @@
import logging
+from twisted.internet import defer
+
from synapse.api.errors import AuthError, NotFoundError, SynapseError
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.types import UserID
from ._base import client_patterns
@@ -38,6 +41,7 @@ class AccountDataServlet(RestServlet):
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.notifier = hs.get_notifier()
+ self._profile_handler = hs.get_profile_handler()
async def on_PUT(self, request, user_id, account_data_type):
requester = await self.auth.get_user_by_req(request)
@@ -46,6 +50,11 @@ class AccountDataServlet(RestServlet):
body = parse_json_object_from_request(request)
+ if account_data_type == "im.vector.hide_profile":
+ user = UserID.from_string(user_id)
+ hide_profile = body.get("hide_profile")
+ await self._profile_handler.set_active(user, not hide_profile, True)
+
max_id = await self.store.add_account_data_for_user(
user_id, account_data_type, body
)
diff --git a/synapse/rest/client/v2_alpha/password_policy.py b/synapse/rest/client/v2_alpha/password_policy.py
new file mode 100644
index 0000000000..968403cca4
--- /dev/null
+++ b/synapse/rest/client/v2_alpha/password_policy.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.http.servlet import RestServlet
+
+from ._base import client_patterns
+
+logger = logging.getLogger(__name__)
+
+
+class PasswordPolicyServlet(RestServlet):
+ PATTERNS = client_patterns("/password_policy$")
+
+ def __init__(self, hs):
+ """
+ Args:
+ hs (synapse.server.HomeServer): server
+ """
+ super(PasswordPolicyServlet, self).__init__()
+
+ self.policy = hs.config.password_policy
+ self.enabled = hs.config.password_policy_enabled
+
+ def on_GET(self, request):
+ if not self.enabled or not self.policy:
+ return (200, {})
+
+ policy = {}
+
+ for param in [
+ "minimum_length",
+ "require_digit",
+ "require_symbol",
+ "require_lowercase",
+ "require_uppercase",
+ ]:
+ if param in self.policy:
+ policy["m.%s" % param] = self.policy[param]
+
+ return (200, policy)
+
+
+def register_servlets(hs, http_server):
+ PasswordPolicyServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index a09189b1b4..7406c13fb4 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
-# Copyright 2015 - 2016 OpenMarket Ltd
-# Copyright 2017 Vector Creations Ltd
+# Copyright 2015-2016 OpenMarket Ltd
+# Copyright 2017-2018 New Vector Ltd
+# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,6 +17,7 @@
import hmac
import logging
+import re
from typing import List, Union
from six import string_types
@@ -123,10 +125,10 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
send_attempt = body["send_attempt"]
next_link = body.get("next_link") # Optional param
- if not check_3pid_allowed(self.hs, "email", email):
+ if not (await check_3pid_allowed(self.hs, "email", body["email"])):
raise SynapseError(
403,
- "Your email domain is not authorized to register on this server",
+ "Your email is not authorized to register on this server",
Codes.THREEPID_DENIED,
)
@@ -190,7 +192,9 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
msisdn = phone_number_to_msisdn(country, phone_number)
- if not check_3pid_allowed(self.hs, "msisdn", msisdn):
+ assert_valid_client_secret(body["client_secret"])
+
+ if not (await check_3pid_allowed(self.hs, "msisdn", msisdn)):
raise SynapseError(
403,
"Phone numbers are not authorized to register on this server",
@@ -373,6 +377,7 @@ class RegisterRestServlet(RestServlet):
self.room_member_handler = hs.get_room_member_handler()
self.macaroon_gen = hs.get_macaroon_generator()
self.ratelimiter = hs.get_registration_ratelimiter()
+ self.password_policy_handler = hs.get_password_policy_handler()
self.clock = hs.get_clock()
self._registration_flows = _calculate_registration_flows(
@@ -414,12 +419,15 @@ class RegisterRestServlet(RestServlet):
# we do basic sanity checks here because the auth layer will store these
# in sessions. Pull out the username/password provided to us.
+ desired_password = None
if "password" in body:
if (
not isinstance(body["password"], string_types)
or len(body["password"]) > 512
):
raise SynapseError(400, "Invalid password")
+ self.password_policy_handler.validate_password(body["password"])
+ desired_password = body["password"]
desired_username = None
if "username" in body:
@@ -430,6 +438,8 @@ class RegisterRestServlet(RestServlet):
raise SynapseError(400, "Invalid username")
desired_username = body["username"]
+ desired_display_name = body.get("display_name")
+
appservice = None
if self.auth.has_access_token(request):
appservice = await self.auth.get_appservice_by_req(request)
@@ -453,7 +463,11 @@ class RegisterRestServlet(RestServlet):
if isinstance(desired_username, string_types):
result = await self._do_appservice_registration(
- desired_username, access_token, body
+ desired_username,
+ desired_password,
+ desired_display_name,
+ access_token,
+ body,
)
return 200, result # we throw for non 200 responses
@@ -514,7 +528,7 @@ class RegisterRestServlet(RestServlet):
medium = auth_result[login_type]["medium"]
address = auth_result[login_type]["address"]
- if not check_3pid_allowed(self.hs, medium, address):
+ if not (await check_3pid_allowed(self.hs, medium, address)):
raise SynapseError(
403,
"Third party identifiers (email/phone numbers)"
@@ -522,6 +536,80 @@ class RegisterRestServlet(RestServlet):
Codes.THREEPID_DENIED,
)
+ existingUid = await self.store.get_user_id_by_threepid(
+ medium, address
+ )
+
+ if existingUid is not None:
+ raise SynapseError(
+ 400, "%s is already in use" % medium, Codes.THREEPID_IN_USE
+ )
+
+ if self.hs.config.register_mxid_from_3pid:
+ # override the desired_username based on the 3PID if any.
+ # reset it first to avoid folks picking their own username.
+ desired_username = None
+
+ # we should have an auth_result at this point if we're going to progress
+ # to register the user (i.e. we haven't picked up a registered_user_id
+ # from our session store), in which case get ready and gen the
+ # desired_username
+ if auth_result:
+ if (
+ self.hs.config.register_mxid_from_3pid == "email"
+ and LoginType.EMAIL_IDENTITY in auth_result
+ ):
+ address = auth_result[LoginType.EMAIL_IDENTITY]["address"]
+ desired_username = synapse.types.strip_invalid_mxid_characters(
+ address.replace("@", "-").lower()
+ )
+
+ # find a unique mxid for the account, suffixing numbers
+ # if needed
+ while True:
+ try:
+ await self.registration_handler.check_username(
+ desired_username,
+ guest_access_token=guest_access_token,
+ assigned_user_id=registered_user_id,
+ )
+ # if we got this far we passed the check.
+ break
+ except SynapseError as e:
+ if e.errcode == Codes.USER_IN_USE:
+ m = re.match(r"^(.*?)(\d+)$", desired_username)
+ if m:
+ desired_username = m.group(1) + str(
+ int(m.group(2)) + 1
+ )
+ else:
+ desired_username += "1"
+ else:
+ # something else went wrong.
+ break
+
+ if self.hs.config.register_just_use_email_for_display_name:
+ desired_display_name = address
+ else:
+ # Custom mapping between email address and display name
+ desired_display_name = self._map_email_to_displayname(address)
+ elif (
+ self.hs.config.register_mxid_from_3pid == "msisdn"
+ and LoginType.MSISDN in auth_result
+ ):
+ desired_username = auth_result[LoginType.MSISDN]["address"]
+ else:
+ raise SynapseError(
+ 400, "Cannot derive mxid from 3pid; no recognised 3pid"
+ )
+
+ if desired_username is not None:
+ await self.registration_handler.check_username(
+ desired_username,
+ guest_access_token=guest_access_token,
+ assigned_user_id=registered_user_id,
+ )
+
if registered_user_id is not None:
logger.info(
"Already registered user ID %r for this session", registered_user_id
@@ -532,9 +620,16 @@ class RegisterRestServlet(RestServlet):
# NB: This may be from the auth handler and NOT from the POST
assert_params_in_dict(params, ["password"])
- desired_username = params.get("username", None)
+ if not self.hs.config.register_mxid_from_3pid:
+ desired_username = params.get("username", None)
+ else:
+ # we keep the original desired_username derived from the 3pid above
+ pass
+
guest_access_token = params.get("guest_access_token", None)
- new_password = params.get("password", None)
+
+ # XXX: don't we need to validate these for length etc like we did on
+ # the ones from the JSON body earlier on in the method?
if desired_username is not None:
desired_username = desired_username.lower()
@@ -567,8 +662,9 @@ class RegisterRestServlet(RestServlet):
registered_user_id = await self.registration_handler.register_user(
localpart=desired_username,
- password=new_password,
+ password=params.get("password", None),
guest_access_token=guest_access_token,
+ default_display_name=desired_display_name,
threepid=threepid,
address=client_addr,
)
@@ -580,6 +676,14 @@ class RegisterRestServlet(RestServlet):
):
await self.store.upsert_monthly_active_user(registered_user_id)
+ if self.hs.config.shadow_server:
+ await self.registration_handler.shadow_register(
+ localpart=desired_username,
+ display_name=desired_display_name,
+ auth_result=auth_result,
+ params=params,
+ )
+
# remember that we've now registered that user account, and with
# what user ID (since the user may not have specified)
self.auth_handler.set_session_data(
@@ -604,11 +708,30 @@ class RegisterRestServlet(RestServlet):
def on_OPTIONS(self, _):
return 200, {}
- async def _do_appservice_registration(self, username, as_token, body):
+ async def _do_appservice_registration(
+ self, username, password, display_name, as_token, body
+ ):
+ # FIXME: appservice_register() is horribly duplicated with register()
+ # and they should probably just be combined together with a config flag.
user_id = await self.registration_handler.appservice_register(
- username, as_token
+ username, as_token, password, display_name
)
- return await self._create_registration_details(user_id, body)
+ result = await self._create_registration_details(user_id, body)
+
+ auth_result = body.get("auth_result")
+ if auth_result and LoginType.EMAIL_IDENTITY in auth_result:
+ threepid = auth_result[LoginType.EMAIL_IDENTITY]
+ await self._register_email_threepid(
+ user_id, threepid, result["access_token"], body.get("bind_email")
+ )
+
+ if auth_result and LoginType.MSISDN in auth_result:
+ threepid = auth_result[LoginType.MSISDN]
+ await self._register_msisdn_threepid(
+ user_id, threepid, result["access_token"], body.get("bind_msisdn")
+ )
+
+ return result
async def _create_registration_details(self, user_id, params):
"""Complete registration of newly-registered user
@@ -659,6 +782,60 @@ class RegisterRestServlet(RestServlet):
)
+def cap(name):
+ """Capitalise parts of a name containing different words, including those
+ separated by hyphens.
+ For example, 'John-Doe'
+
+ Args:
+ name (str): The name to parse
+ """
+ if not name:
+ return name
+
+ # Split the name by whitespace then hyphens, capitalizing each part then
+ # joining it back together.
+ capatilized_name = " ".join(
+ "-".join(part.capitalize() for part in space_part.split("-"))
+ for space_part in name.split()
+ )
+ return capatilized_name
+
+
+def _map_email_to_displayname(address):
+ """Custom mapping from an email address to a user displayname
+
+ Args:
+ address (str): The email address to process
+ Returns:
+ str: The new displayname
+ """
+ # Split the part before and after the @ in the email.
+ # Replace all . with spaces in the first part
+ parts = address.replace(".", " ").split("@")
+
+ # Figure out which org this email address belongs to
+ org_parts = parts[1].split(" ")
+
+ # If this is a ...matrix.org email, mark them as an Admin
+ if org_parts[-2] == "matrix" and org_parts[-1] == "org":
+ org = "Tchap Admin"
+
+ # Is this is a ...gouv.fr address, set the org to whatever is before
+ # gouv.fr. If there isn't anything (a @gouv.fr email) simply mark their
+ # org as "gouv"
+ elif org_parts[-2] == "gouv" and org_parts[-1] == "fr":
+ org = org_parts[-3] if len(org_parts) > 2 else org_parts[-2]
+
+ # Otherwise, mark their org as the email's second-level domain name
+ else:
+ org = org_parts[-2]
+
+ desired_display_name = cap(parts[0]) + " [" + cap(org) + "]"
+
+ return desired_display_name
+
+
def _calculate_registration_flows(
# technically `config` has to provide *all* of these interfaces, not just one
config: Union[RegistrationConfig, ConsentConfig, CaptchaConfig],
diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py
index bef91a2d3e..faf9dbdea4 100644
--- a/synapse/rest/client/v2_alpha/user_directory.py
+++ b/synapse/rest/client/v2_alpha/user_directory.py
@@ -15,8 +15,13 @@
import logging
+from signedjson.sign import sign_json
+
+from twisted.internet import defer
+
from synapse.api.errors import SynapseError
from synapse.http.servlet import RestServlet, parse_json_object_from_request
+from synapse.types import UserID
from ._base import client_patterns
@@ -35,6 +40,7 @@ class UserDirectorySearchRestServlet(RestServlet):
self.hs = hs
self.auth = hs.get_auth()
self.user_directory_handler = hs.get_user_directory_handler()
+ self.http_client = hs.get_simple_http_client()
async def on_POST(self, request):
"""Searches for users in directory
@@ -61,6 +67,16 @@ class UserDirectorySearchRestServlet(RestServlet):
body = parse_json_object_from_request(request)
+ if self.hs.config.user_directory_defer_to_id_server:
+ signed_body = sign_json(
+ body, self.hs.hostname, self.hs.config.signing_key[0]
+ )
+ url = "%s/_matrix/identity/api/v1/user_directory/search" % (
+ self.hs.config.user_directory_defer_to_id_server,
+ )
+ resp = await self.http_client.post_json_get_json(url, signed_body)
+ defer.returnValue((200, resp))
+
limit = body.get("limit", 10)
limit = min(limit, 50)
@@ -76,5 +92,87 @@ class UserDirectorySearchRestServlet(RestServlet):
return 200, results
+class UserInfoServlet(RestServlet):
+ """
+ GET /user/{user_id}/info HTTP/1.1
+ """
+
+ PATTERNS = client_patterns("/user/(?P<user_id>[^/]*)/info$")
+
+ def __init__(self, hs):
+ super(UserInfoServlet, self).__init__()
+ self.hs = hs
+ self.auth = hs.get_auth()
+ self.store = hs.get_datastore()
+ self.notifier = hs.get_notifier()
+ self.clock = hs.get_clock()
+ self.transport_layer = hs.get_federation_transport_client()
+ registry = hs.get_federation_registry()
+
+ if not registry.query_handlers.get("user_info"):
+ registry.register_query_handler("user_info", self._on_federation_query)
+
+ @defer.inlineCallbacks
+ def on_GET(self, request, user_id):
+ # Ensure the user is authenticated
+ yield self.auth.get_user_by_req(request, allow_guest=False)
+
+ user = UserID.from_string(user_id)
+ if not self.hs.is_mine(user):
+ # Attempt to make a federation request to the server that owns this user
+ args = {"user_id": user_id}
+ res = yield self.transport_layer.make_query(
+ user.domain, "user_info", args, retry_on_dns_fail=True
+ )
+ defer.returnValue((200, res))
+
+ res = yield self._get_user_info(user_id)
+ defer.returnValue((200, res))
+
+ @defer.inlineCallbacks
+ def _on_federation_query(self, args):
+ """Called when a request for user information appears over federation
+
+ Args:
+ args (dict): Dictionary of query arguments provided by the request
+
+ Returns:
+ Deferred[dict]: Deactivation and expiration information for a given user
+ """
+ user_id = args.get("user_id")
+ if not user_id:
+ raise SynapseError(400, "user_id not provided")
+
+ user = UserID.from_string(user_id)
+ if not self.hs.is_mine(user):
+ raise SynapseError(400, "User is not hosted on this homeserver")
+
+ res = yield self._get_user_info(user_id)
+ defer.returnValue(res)
+
+ @defer.inlineCallbacks
+ def _get_user_info(self, user_id):
+ """Retrieve information about a given user
+
+ Args:
+ user_id (str): The User ID of a given user on this homeserver
+
+ Returns:
+ Deferred[dict]: Deactivation and expiration information for a given user
+ """
+ # Check whether user is deactivated
+ is_deactivated = yield self.store.get_user_deactivated_status(user_id)
+
+ # Check whether user is expired
+ expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
+ is_expired = (
+ expiration_ts is not None and self.clock.time_msec() >= expiration_ts
+ )
+
+ res = {"expired": is_expired, "deactivated": is_deactivated}
+ defer.returnValue(res)
+
+
def register_servlets(hs, http_server):
UserDirectorySearchRestServlet(hs).register(http_server)
+ UserInfoServlet(hs).register(http_server)
diff --git a/synapse/rulecheck/__init__.py b/synapse/rulecheck/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/synapse/rulecheck/__init__.py
diff --git a/synapse/rulecheck/domain_rule_checker.py b/synapse/rulecheck/domain_rule_checker.py
new file mode 100644
index 0000000000..6f2a1931c5
--- /dev/null
+++ b/synapse/rulecheck/domain_rule_checker.py
@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+from synapse.config._base import ConfigError
+
+logger = logging.getLogger(__name__)
+
+
+class DomainRuleChecker(object):
+ """
+ A re-implementation of the SpamChecker that prevents users in one domain from
+ inviting users in other domains to rooms, based on a configuration.
+
+ Takes a config in the format:
+
+ spam_checker:
+ module: "rulecheck.DomainRuleChecker"
+ config:
+ domain_mapping:
+ "inviter_domain": [ "invitee_domain_permitted", "other_domain_permitted" ]
+ "other_inviter_domain": [ "invitee_domain_permitted" ]
+ default: False
+
+ # Only let local users join rooms if they were explicitly invited.
+ can_only_join_rooms_with_invite: false
+
+ # Only let local users create rooms if they are inviting only one
+ # other user, and that user matches the rules above.
+ can_only_create_one_to_one_rooms: false
+
+ # Only let local users invite during room creation, regardless of the
+ # domain mapping rules above.
+ can_only_invite_during_room_creation: false
+
+ # Prevent local users from inviting users from certain domains to
+ # rooms published in the room directory.
+ domains_prevented_from_being_invited_to_published_rooms: []
+
+ # Allow third party invites
+ can_invite_by_third_party_id: true
+
+ Don't forget to consider if you can invite users from your own domain.
+ """
+
+ def __init__(self, config):
+ self.domain_mapping = config["domain_mapping"] or {}
+ self.default = config["default"]
+
+ self.can_only_join_rooms_with_invite = config.get(
+ "can_only_join_rooms_with_invite", False
+ )
+ self.can_only_create_one_to_one_rooms = config.get(
+ "can_only_create_one_to_one_rooms", False
+ )
+ self.can_only_invite_during_room_creation = config.get(
+ "can_only_invite_during_room_creation", False
+ )
+ self.can_invite_by_third_party_id = config.get(
+ "can_invite_by_third_party_id", True
+ )
+ self.domains_prevented_from_being_invited_to_published_rooms = config.get(
+ "domains_prevented_from_being_invited_to_published_rooms", []
+ )
+
+ def check_event_for_spam(self, event):
+ """Implements synapse.events.SpamChecker.check_event_for_spam
+ """
+ return False
+
+ def user_may_invite(
+ self,
+ inviter_userid,
+ invitee_userid,
+ third_party_invite,
+ room_id,
+ new_room,
+ published_room=False,
+ ):
+ """Implements synapse.events.SpamChecker.user_may_invite
+ """
+ if self.can_only_invite_during_room_creation and not new_room:
+ return False
+
+ if not self.can_invite_by_third_party_id and third_party_invite:
+ return False
+
+ # This is a third party invite (without a bound mxid), so unless we have
+ # banned all third party invites (above) we allow it.
+ if not invitee_userid:
+ return True
+
+ inviter_domain = self._get_domain_from_id(inviter_userid)
+ invitee_domain = self._get_domain_from_id(invitee_userid)
+
+ if inviter_domain not in self.domain_mapping:
+ return self.default
+
+ if (
+ published_room
+ and invitee_domain
+ in self.domains_prevented_from_being_invited_to_published_rooms
+ ):
+ return False
+
+ return invitee_domain in self.domain_mapping[inviter_domain]
+
+ def user_may_create_room(
+ self, userid, invite_list, third_party_invite_list, cloning
+ ):
+ """Implements synapse.events.SpamChecker.user_may_create_room
+ """
+
+ if cloning:
+ return True
+
+ if not self.can_invite_by_third_party_id and third_party_invite_list:
+ return False
+
+ number_of_invites = len(invite_list) + len(third_party_invite_list)
+
+ if self.can_only_create_one_to_one_rooms and number_of_invites != 1:
+ return False
+
+ return True
+
+ def user_may_create_room_alias(self, userid, room_alias):
+ """Implements synapse.events.SpamChecker.user_may_create_room_alias
+ """
+ return True
+
+ def user_may_publish_room(self, userid, room_id):
+ """Implements synapse.events.SpamChecker.user_may_publish_room
+ """
+ return True
+
+ def user_may_join_room(self, userid, room_id, is_invited):
+ """Implements synapse.events.SpamChecker.user_may_join_room
+ """
+ if self.can_only_join_rooms_with_invite and not is_invited:
+ return False
+
+ return True
+
+ @staticmethod
+ def parse_config(config):
+ """Implements synapse.events.SpamChecker.parse_config
+ """
+ if "default" in config:
+ return config
+ else:
+ raise ConfigError("No default set for spam_config DomainRuleChecker")
+
+ @staticmethod
+ def _get_domain_from_id(mxid):
+ """Parses a string and returns the domain part of the mxid.
+
+ Args:
+ mxid (str): a valid mxid
+
+ Returns:
+ str: the domain part of the mxid
+
+ """
+ idx = mxid.find(":")
+ if idx == -1:
+ raise Exception("Invalid ID: %r" % (mxid,))
+ return mxid[idx + 1 :]
diff --git a/synapse/server.py b/synapse/server.py
index 7926867b77..884028ca77 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -50,7 +50,7 @@ from synapse.federation.send_queue import FederationRemoteSendQueue
from synapse.federation.sender import FederationSender
from synapse.federation.transport.client import TransportLayerClient
from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer
-from synapse.groups.groups_server import GroupsServerHandler
+from synapse.groups.groups_server import GroupsServerHandler, GroupsServerWorkerHandler
from synapse.handlers import Handlers
from synapse.handlers.account_validity import AccountValidityHandler
from synapse.handlers.acme import AcmeHandler
@@ -62,10 +62,11 @@ from synapse.handlers.devicemessage import DeviceMessageHandler
from synapse.handlers.e2e_keys import E2eKeysHandler
from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler
from synapse.handlers.events import EventHandler, EventStreamHandler
-from synapse.handlers.groups_local import GroupsLocalHandler
+from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler
from synapse.handlers.initial_sync import InitialSyncHandler
from synapse.handlers.message import EventCreationHandler, MessageHandler
from synapse.handlers.pagination import PaginationHandler
+from synapse.handlers.password_policy import PasswordPolicyHandler
from synapse.handlers.presence import PresenceHandler
from synapse.handlers.profile import BaseProfileHandler, MasterProfileHandler
from synapse.handlers.read_marker import ReadMarkerHandler
@@ -167,6 +168,7 @@ class HomeServer(object):
"event_builder_factory",
"filtering",
"http_client_context_factory",
+ "proxied_http_client",
"simple_http_client",
"proxied_http_client",
"media_repository",
@@ -199,6 +201,7 @@ class HomeServer(object):
"saml_handler",
"event_client_serializer",
"storage",
+ "password_policy_handler",
]
REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"]
@@ -460,10 +463,16 @@ class HomeServer(object):
return UserDirectoryHandler(self)
def build_groups_local_handler(self):
- return GroupsLocalHandler(self)
+ if self.config.worker_app:
+ return GroupsLocalWorkerHandler(self)
+ else:
+ return GroupsLocalHandler(self)
def build_groups_server_handler(self):
- return GroupsServerHandler(self)
+ if self.config.worker_app:
+ return GroupsServerWorkerHandler(self)
+ else:
+ return GroupsServerHandler(self)
def build_groups_attestation_signing(self):
return GroupAttestationSigning(self)
@@ -530,6 +539,9 @@ class HomeServer(object):
def build_storage(self) -> Storage:
return Storage(self, self.datastores)
+ def build_password_policy_handler(self):
+ return PasswordPolicyHandler(self)
+
def remove_pusher(self, app_id, push_key, user_id):
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
diff --git a/synapse/server.pyi b/synapse/server.pyi
index 90347ac23e..40eabfe5d9 100644
--- a/synapse/server.pyi
+++ b/synapse/server.pyi
@@ -107,3 +107,5 @@ class HomeServer(object):
self,
) -> synapse.replication.tcp.client.ReplicationClientHandler:
pass
+ def is_mine_id(self, domain_id: str) -> bool:
+ pass
diff --git a/synapse/storage/data_stores/main/appservice.py b/synapse/storage/data_stores/main/appservice.py
index b2f39649fd..340de45e99 100644
--- a/synapse/storage/data_stores/main/appservice.py
+++ b/synapse/storage/data_stores/main/appservice.py
@@ -35,7 +35,7 @@ def _make_exclusive_regex(services_cache):
exclusive_user_regexes = [
regex.pattern
for service in services_cache
- for regex in service.get_exlusive_user_regexes()
+ for regex in service.get_exclusive_user_regexes()
]
if exclusive_user_regexes:
exclusive_user_regex = "|".join("(" + r + ")" for r in exclusive_user_regexes)
diff --git a/synapse/storage/data_stores/main/group_server.py b/synapse/storage/data_stores/main/group_server.py
index 6acd45e9f3..0963e6c250 100644
--- a/synapse/storage/data_stores/main/group_server.py
+++ b/synapse/storage/data_stores/main/group_server.py
@@ -27,21 +27,7 @@ _DEFAULT_CATEGORY_ID = ""
_DEFAULT_ROLE_ID = ""
-class GroupServerStore(SQLBaseStore):
- def set_group_join_policy(self, group_id, join_policy):
- """Set the join policy of a group.
-
- join_policy can be one of:
- * "invite"
- * "open"
- """
- return self.db.simple_update_one(
- table="groups",
- keyvalues={"group_id": group_id},
- updatevalues={"join_policy": join_policy},
- desc="set_group_join_policy",
- )
-
+class GroupServerWorkerStore(SQLBaseStore):
def get_group(self, group_id):
return self.db.simple_select_one(
table="groups",
@@ -157,6 +143,366 @@ class GroupServerStore(SQLBaseStore):
"get_rooms_for_summary", _get_rooms_for_summary_txn
)
+ @defer.inlineCallbacks
+ def get_group_categories(self, group_id):
+ rows = yield self.db.simple_select_list(
+ table="group_room_categories",
+ keyvalues={"group_id": group_id},
+ retcols=("category_id", "is_public", "profile"),
+ desc="get_group_categories",
+ )
+
+ return {
+ row["category_id"]: {
+ "is_public": row["is_public"],
+ "profile": json.loads(row["profile"]),
+ }
+ for row in rows
+ }
+
+ @defer.inlineCallbacks
+ def get_group_category(self, group_id, category_id):
+ category = yield self.db.simple_select_one(
+ table="group_room_categories",
+ keyvalues={"group_id": group_id, "category_id": category_id},
+ retcols=("is_public", "profile"),
+ desc="get_group_category",
+ )
+
+ category["profile"] = json.loads(category["profile"])
+
+ return category
+
+ @defer.inlineCallbacks
+ def get_group_roles(self, group_id):
+ rows = yield self.db.simple_select_list(
+ table="group_roles",
+ keyvalues={"group_id": group_id},
+ retcols=("role_id", "is_public", "profile"),
+ desc="get_group_roles",
+ )
+
+ return {
+ row["role_id"]: {
+ "is_public": row["is_public"],
+ "profile": json.loads(row["profile"]),
+ }
+ for row in rows
+ }
+
+ @defer.inlineCallbacks
+ def get_group_role(self, group_id, role_id):
+ role = yield self.db.simple_select_one(
+ table="group_roles",
+ keyvalues={"group_id": group_id, "role_id": role_id},
+ retcols=("is_public", "profile"),
+ desc="get_group_role",
+ )
+
+ role["profile"] = json.loads(role["profile"])
+
+ return role
+
+ def get_local_groups_for_room(self, room_id):
+ """Get all of the local group that contain a given room
+ Args:
+ room_id (str): The ID of a room
+ Returns:
+ Deferred[list[str]]: A twisted.Deferred containing a list of group ids
+ containing this room
+ """
+ return self.db.simple_select_onecol(
+ table="group_rooms",
+ keyvalues={"room_id": room_id},
+ retcol="group_id",
+ desc="get_local_groups_for_room",
+ )
+
+ def get_users_for_summary_by_role(self, group_id, include_private=False):
+ """Get the users and roles that should be included in a summary request
+
+ Returns ([users], [roles])
+ """
+
+ def _get_users_for_summary_txn(txn):
+ keyvalues = {"group_id": group_id}
+ if not include_private:
+ keyvalues["is_public"] = True
+
+ sql = """
+ SELECT user_id, is_public, role_id, user_order
+ FROM group_summary_users
+ WHERE group_id = ?
+ """
+
+ if not include_private:
+ sql += " AND is_public = ?"
+ txn.execute(sql, (group_id, True))
+ else:
+ txn.execute(sql, (group_id,))
+
+ users = [
+ {
+ "user_id": row[0],
+ "is_public": row[1],
+ "role_id": row[2] if row[2] != _DEFAULT_ROLE_ID else None,
+ "order": row[3],
+ }
+ for row in txn
+ ]
+
+ sql = """
+ SELECT role_id, is_public, profile, role_order
+ FROM group_summary_roles
+ INNER JOIN group_roles USING (group_id, role_id)
+ WHERE group_id = ?
+ """
+
+ if not include_private:
+ sql += " AND is_public = ?"
+ txn.execute(sql, (group_id, True))
+ else:
+ txn.execute(sql, (group_id,))
+
+ roles = {
+ row[0]: {
+ "is_public": row[1],
+ "profile": json.loads(row[2]),
+ "order": row[3],
+ }
+ for row in txn
+ }
+
+ return users, roles
+
+ return self.db.runInteraction(
+ "get_users_for_summary_by_role", _get_users_for_summary_txn
+ )
+
+ def is_user_in_group(self, user_id, group_id):
+ return self.db.simple_select_one_onecol(
+ table="group_users",
+ keyvalues={"group_id": group_id, "user_id": user_id},
+ retcol="user_id",
+ allow_none=True,
+ desc="is_user_in_group",
+ ).addCallback(lambda r: bool(r))
+
+ def is_user_admin_in_group(self, group_id, user_id):
+ return self.db.simple_select_one_onecol(
+ table="group_users",
+ keyvalues={"group_id": group_id, "user_id": user_id},
+ retcol="is_admin",
+ allow_none=True,
+ desc="is_user_admin_in_group",
+ )
+
+ def is_user_invited_to_local_group(self, group_id, user_id):
+ """Has the group server invited a user?
+ """
+ return self.db.simple_select_one_onecol(
+ table="group_invites",
+ keyvalues={"group_id": group_id, "user_id": user_id},
+ retcol="user_id",
+ desc="is_user_invited_to_local_group",
+ allow_none=True,
+ )
+
+ def get_users_membership_info_in_group(self, group_id, user_id):
+ """Get a dict describing the membership of a user in a group.
+
+ Example if joined:
+
+ {
+ "membership": "join",
+ "is_public": True,
+ "is_privileged": False,
+ }
+
+ Returns an empty dict if the user is not join/invite/etc
+ """
+
+ def _get_users_membership_in_group_txn(txn):
+ row = self.db.simple_select_one_txn(
+ txn,
+ table="group_users",
+ keyvalues={"group_id": group_id, "user_id": user_id},
+ retcols=("is_admin", "is_public"),
+ allow_none=True,
+ )
+
+ if row:
+ return {
+ "membership": "join",
+ "is_public": row["is_public"],
+ "is_privileged": row["is_admin"],
+ }
+
+ row = self.db.simple_select_one_onecol_txn(
+ txn,
+ table="group_invites",
+ keyvalues={"group_id": group_id, "user_id": user_id},
+ retcol="user_id",
+ allow_none=True,
+ )
+
+ if row:
+ return {"membership": "invite"}
+
+ return {}
+
+ return self.db.runInteraction(
+ "get_users_membership_info_in_group", _get_users_membership_in_group_txn
+ )
+
+ def get_publicised_groups_for_user(self, user_id):
+ """Get all groups a user is publicising
+ """
+ return self.db.simple_select_onecol(
+ table="local_group_membership",
+ keyvalues={"user_id": user_id, "membership": "join", "is_publicised": True},
+ retcol="group_id",
+ desc="get_publicised_groups_for_user",
+ )
+
+ def get_attestations_need_renewals(self, valid_until_ms):
+ """Get all attestations that need to be renewed until givent time
+ """
+
+ def _get_attestations_need_renewals_txn(txn):
+ sql = """
+ SELECT group_id, user_id FROM group_attestations_renewals
+ WHERE valid_until_ms <= ?
+ """
+ txn.execute(sql, (valid_until_ms,))
+ return self.db.cursor_to_dict(txn)
+
+ return self.db.runInteraction(
+ "get_attestations_need_renewals", _get_attestations_need_renewals_txn
+ )
+
+ @defer.inlineCallbacks
+ def get_remote_attestation(self, group_id, user_id):
+ """Get the attestation that proves the remote agrees that the user is
+ in the group.
+ """
+ row = yield self.db.simple_select_one(
+ table="group_attestations_remote",
+ keyvalues={"group_id": group_id, "user_id": user_id},
+ retcols=("valid_until_ms", "attestation_json"),
+ desc="get_remote_attestation",
+ allow_none=True,
+ )
+
+ now = int(self._clock.time_msec())
+ if row and now < row["valid_until_ms"]:
+ return json.loads(row["attestation_json"])
+
+ return None
+
+ def get_joined_groups(self, user_id):
+ return self.db.simple_select_onecol(
+ table="local_group_membership",
+ keyvalues={"user_id": user_id, "membership": "join"},
+ retcol="group_id",
+ desc="get_joined_groups",
+ )
+
+ def get_all_groups_for_user(self, user_id, now_token):
+ def _get_all_groups_for_user_txn(txn):
+ sql = """
+ SELECT group_id, type, membership, u.content
+ FROM local_group_updates AS u
+ INNER JOIN local_group_membership USING (group_id, user_id)
+ WHERE user_id = ? AND membership != 'leave'
+ AND stream_id <= ?
+ """
+ txn.execute(sql, (user_id, now_token))
+ return [
+ {
+ "group_id": row[0],
+ "type": row[1],
+ "membership": row[2],
+ "content": json.loads(row[3]),
+ }
+ for row in txn
+ ]
+
+ return self.db.runInteraction(
+ "get_all_groups_for_user", _get_all_groups_for_user_txn
+ )
+
+ def get_groups_changes_for_user(self, user_id, from_token, to_token):
+ from_token = int(from_token)
+ has_changed = self._group_updates_stream_cache.has_entity_changed(
+ user_id, from_token
+ )
+ if not has_changed:
+ return defer.succeed([])
+
+ def _get_groups_changes_for_user_txn(txn):
+ sql = """
+ SELECT group_id, membership, type, u.content
+ FROM local_group_updates AS u
+ INNER JOIN local_group_membership USING (group_id, user_id)
+ WHERE user_id = ? AND ? < stream_id AND stream_id <= ?
+ """
+ txn.execute(sql, (user_id, from_token, to_token))
+ return [
+ {
+ "group_id": group_id,
+ "membership": membership,
+ "type": gtype,
+ "content": json.loads(content_json),
+ }
+ for group_id, membership, gtype, content_json in txn
+ ]
+
+ return self.db.runInteraction(
+ "get_groups_changes_for_user", _get_groups_changes_for_user_txn
+ )
+
+ def get_all_groups_changes(self, from_token, to_token, limit):
+ from_token = int(from_token)
+ has_changed = self._group_updates_stream_cache.has_any_entity_changed(
+ from_token
+ )
+ if not has_changed:
+ return defer.succeed([])
+
+ def _get_all_groups_changes_txn(txn):
+ sql = """
+ SELECT stream_id, group_id, user_id, type, content
+ FROM local_group_updates
+ WHERE ? < stream_id AND stream_id <= ?
+ LIMIT ?
+ """
+ txn.execute(sql, (from_token, to_token, limit))
+ return [
+ (stream_id, group_id, user_id, gtype, json.loads(content_json))
+ for stream_id, group_id, user_id, gtype, content_json in txn
+ ]
+
+ return self.db.runInteraction(
+ "get_all_groups_changes", _get_all_groups_changes_txn
+ )
+
+
+class GroupServerStore(GroupServerWorkerStore):
+ def set_group_join_policy(self, group_id, join_policy):
+ """Set the join policy of a group.
+
+ join_policy can be one of:
+ * "invite"
+ * "open"
+ """
+ return self.db.simple_update_one(
+ table="groups",
+ keyvalues={"group_id": group_id},
+ updatevalues={"join_policy": join_policy},
+ desc="set_group_join_policy",
+ )
+
def add_room_to_summary(self, group_id, room_id, category_id, order, is_public):
return self.db.runInteraction(
"add_room_to_summary",
@@ -299,36 +645,6 @@ class GroupServerStore(SQLBaseStore):
desc="remove_room_from_summary",
)
- @defer.inlineCallbacks
- def get_group_categories(self, group_id):
- rows = yield self.db.simple_select_list(
- table="group_room_categories",
- keyvalues={"group_id": group_id},
- retcols=("category_id", "is_public", "profile"),
- desc="get_group_categories",
- )
-
- return {
- row["category_id"]: {
- "is_public": row["is_public"],
- "profile": json.loads(row["profile"]),
- }
- for row in rows
- }
-
- @defer.inlineCallbacks
- def get_group_category(self, group_id, category_id):
- category = yield self.db.simple_select_one(
- table="group_room_categories",
- keyvalues={"group_id": group_id, "category_id": category_id},
- retcols=("is_public", "profile"),
- desc="get_group_category",
- )
-
- category["profile"] = json.loads(category["profile"])
-
- return category
-
def upsert_group_category(self, group_id, category_id, profile, is_public):
"""Add/update room category for group
"""
@@ -360,36 +676,6 @@ class GroupServerStore(SQLBaseStore):
desc="remove_group_category",
)
- @defer.inlineCallbacks
- def get_group_roles(self, group_id):
- rows = yield self.db.simple_select_list(
- table="group_roles",
- keyvalues={"group_id": group_id},
- retcols=("role_id", "is_public", "profile"),
- desc="get_group_roles",
- )
-
- return {
- row["role_id"]: {
- "is_public": row["is_public"],
- "profile": json.loads(row["profile"]),
- }
- for row in rows
- }
-
- @defer.inlineCallbacks
- def get_group_role(self, group_id, role_id):
- role = yield self.db.simple_select_one(
- table="group_roles",
- keyvalues={"group_id": group_id, "role_id": role_id},
- retcols=("is_public", "profile"),
- desc="get_group_role",
- )
-
- role["profile"] = json.loads(role["profile"])
-
- return role
-
def upsert_group_role(self, group_id, role_id, profile, is_public):
"""Add/remove user role
"""
@@ -555,100 +841,6 @@ class GroupServerStore(SQLBaseStore):
desc="remove_user_from_summary",
)
- def get_local_groups_for_room(self, room_id):
- """Get all of the local group that contain a given room
- Args:
- room_id (str): The ID of a room
- Returns:
- Deferred[list[str]]: A twisted.Deferred containing a list of group ids
- containing this room
- """
- return self.db.simple_select_onecol(
- table="group_rooms",
- keyvalues={"room_id": room_id},
- retcol="group_id",
- desc="get_local_groups_for_room",
- )
-
- def get_users_for_summary_by_role(self, group_id, include_private=False):
- """Get the users and roles that should be included in a summary request
-
- Returns ([users], [roles])
- """
-
- def _get_users_for_summary_txn(txn):
- keyvalues = {"group_id": group_id}
- if not include_private:
- keyvalues["is_public"] = True
-
- sql = """
- SELECT user_id, is_public, role_id, user_order
- FROM group_summary_users
- WHERE group_id = ?
- """
-
- if not include_private:
- sql += " AND is_public = ?"
- txn.execute(sql, (group_id, True))
- else:
- txn.execute(sql, (group_id,))
-
- users = [
- {
- "user_id": row[0],
- "is_public": row[1],
- "role_id": row[2] if row[2] != _DEFAULT_ROLE_ID else None,
- "order": row[3],
- }
- for row in txn
- ]
-
- sql = """
- SELECT role_id, is_public, profile, role_order
- FROM group_summary_roles
- INNER JOIN group_roles USING (group_id, role_id)
- WHERE group_id = ?
- """
-
- if not include_private:
- sql += " AND is_public = ?"
- txn.execute(sql, (group_id, True))
- else:
- txn.execute(sql, (group_id,))
-
- roles = {
- row[0]: {
- "is_public": row[1],
- "profile": json.loads(row[2]),
- "order": row[3],
- }
- for row in txn
- }
-
- return users, roles
-
- return self.db.runInteraction(
- "get_users_for_summary_by_role", _get_users_for_summary_txn
- )
-
- def is_user_in_group(self, user_id, group_id):
- return self.db.simple_select_one_onecol(
- table="group_users",
- keyvalues={"group_id": group_id, "user_id": user_id},
- retcol="user_id",
- allow_none=True,
- desc="is_user_in_group",
- ).addCallback(lambda r: bool(r))
-
- def is_user_admin_in_group(self, group_id, user_id):
- return self.db.simple_select_one_onecol(
- table="group_users",
- keyvalues={"group_id": group_id, "user_id": user_id},
- retcol="is_admin",
- allow_none=True,
- desc="is_user_admin_in_group",
- )
-
def add_group_invite(self, group_id, user_id):
"""Record that the group server has invited a user
"""
@@ -658,64 +850,6 @@ class GroupServerStore(SQLBaseStore):
desc="add_group_invite",
)
- def is_user_invited_to_local_group(self, group_id, user_id):
- """Has the group server invited a user?
- """
- return self.db.simple_select_one_onecol(
- table="group_invites",
- keyvalues={"group_id": group_id, "user_id": user_id},
- retcol="user_id",
- desc="is_user_invited_to_local_group",
- allow_none=True,
- )
-
- def get_users_membership_info_in_group(self, group_id, user_id):
- """Get a dict describing the membership of a user in a group.
-
- Example if joined:
-
- {
- "membership": "join",
- "is_public": True,
- "is_privileged": False,
- }
-
- Returns an empty dict if the user is not join/invite/etc
- """
-
- def _get_users_membership_in_group_txn(txn):
- row = self.db.simple_select_one_txn(
- txn,
- table="group_users",
- keyvalues={"group_id": group_id, "user_id": user_id},
- retcols=("is_admin", "is_public"),
- allow_none=True,
- )
-
- if row:
- return {
- "membership": "join",
- "is_public": row["is_public"],
- "is_privileged": row["is_admin"],
- }
-
- row = self.db.simple_select_one_onecol_txn(
- txn,
- table="group_invites",
- keyvalues={"group_id": group_id, "user_id": user_id},
- retcol="user_id",
- allow_none=True,
- )
-
- if row:
- return {"membership": "invite"}
-
- return {}
-
- return self.db.runInteraction(
- "get_users_membership_info_in_group", _get_users_membership_in_group_txn
- )
-
def add_user_to_group(
self,
group_id,
@@ -846,16 +980,6 @@ class GroupServerStore(SQLBaseStore):
"remove_room_from_group", _remove_room_from_group_txn
)
- def get_publicised_groups_for_user(self, user_id):
- """Get all groups a user is publicising
- """
- return self.db.simple_select_onecol(
- table="local_group_membership",
- keyvalues={"user_id": user_id, "membership": "join", "is_publicised": True},
- retcol="group_id",
- desc="get_publicised_groups_for_user",
- )
-
def update_group_publicity(self, group_id, user_id, publicise):
"""Update whether the user is publicising their membership of the group
"""
@@ -1000,22 +1124,6 @@ class GroupServerStore(SQLBaseStore):
desc="update_group_profile",
)
- def get_attestations_need_renewals(self, valid_until_ms):
- """Get all attestations that need to be renewed until givent time
- """
-
- def _get_attestations_need_renewals_txn(txn):
- sql = """
- SELECT group_id, user_id FROM group_attestations_renewals
- WHERE valid_until_ms <= ?
- """
- txn.execute(sql, (valid_until_ms,))
- return self.db.cursor_to_dict(txn)
-
- return self.db.runInteraction(
- "get_attestations_need_renewals", _get_attestations_need_renewals_txn
- )
-
def update_attestation_renewal(self, group_id, user_id, attestation):
"""Update an attestation that we have renewed
"""
@@ -1054,112 +1162,6 @@ class GroupServerStore(SQLBaseStore):
desc="remove_attestation_renewal",
)
- @defer.inlineCallbacks
- def get_remote_attestation(self, group_id, user_id):
- """Get the attestation that proves the remote agrees that the user is
- in the group.
- """
- row = yield self.db.simple_select_one(
- table="group_attestations_remote",
- keyvalues={"group_id": group_id, "user_id": user_id},
- retcols=("valid_until_ms", "attestation_json"),
- desc="get_remote_attestation",
- allow_none=True,
- )
-
- now = int(self._clock.time_msec())
- if row and now < row["valid_until_ms"]:
- return json.loads(row["attestation_json"])
-
- return None
-
- def get_joined_groups(self, user_id):
- return self.db.simple_select_onecol(
- table="local_group_membership",
- keyvalues={"user_id": user_id, "membership": "join"},
- retcol="group_id",
- desc="get_joined_groups",
- )
-
- def get_all_groups_for_user(self, user_id, now_token):
- def _get_all_groups_for_user_txn(txn):
- sql = """
- SELECT group_id, type, membership, u.content
- FROM local_group_updates AS u
- INNER JOIN local_group_membership USING (group_id, user_id)
- WHERE user_id = ? AND membership != 'leave'
- AND stream_id <= ?
- """
- txn.execute(sql, (user_id, now_token))
- return [
- {
- "group_id": row[0],
- "type": row[1],
- "membership": row[2],
- "content": json.loads(row[3]),
- }
- for row in txn
- ]
-
- return self.db.runInteraction(
- "get_all_groups_for_user", _get_all_groups_for_user_txn
- )
-
- def get_groups_changes_for_user(self, user_id, from_token, to_token):
- from_token = int(from_token)
- has_changed = self._group_updates_stream_cache.has_entity_changed(
- user_id, from_token
- )
- if not has_changed:
- return defer.succeed([])
-
- def _get_groups_changes_for_user_txn(txn):
- sql = """
- SELECT group_id, membership, type, u.content
- FROM local_group_updates AS u
- INNER JOIN local_group_membership USING (group_id, user_id)
- WHERE user_id = ? AND ? < stream_id AND stream_id <= ?
- """
- txn.execute(sql, (user_id, from_token, to_token))
- return [
- {
- "group_id": group_id,
- "membership": membership,
- "type": gtype,
- "content": json.loads(content_json),
- }
- for group_id, membership, gtype, content_json in txn
- ]
-
- return self.db.runInteraction(
- "get_groups_changes_for_user", _get_groups_changes_for_user_txn
- )
-
- def get_all_groups_changes(self, from_token, to_token, limit):
- from_token = int(from_token)
- has_changed = self._group_updates_stream_cache.has_any_entity_changed(
- from_token
- )
- if not has_changed:
- return defer.succeed([])
-
- def _get_all_groups_changes_txn(txn):
- sql = """
- SELECT stream_id, group_id, user_id, type, content
- FROM local_group_updates
- WHERE ? < stream_id AND stream_id <= ?
- LIMIT ?
- """
- txn.execute(sql, (from_token, to_token, limit))
- return [
- (stream_id, group_id, user_id, gtype, json.loads(content_json))
- for stream_id, group_id, user_id, gtype, content_json in txn
- ]
-
- return self.db.runInteraction(
- "get_all_groups_changes", _get_all_groups_changes_txn
- )
-
def get_group_stream_token(self):
return self._group_updates_id_gen.get_current_token()
diff --git a/synapse/storage/data_stores/main/profile.py b/synapse/storage/data_stores/main/profile.py
index 2b52cf9c1a..3dc4451447 100644
--- a/synapse/storage/data_stores/main/profile.py
+++ b/synapse/storage/data_stores/main/profile.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
+# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,9 +17,13 @@
from twisted.internet import defer
from synapse.api.errors import StoreError
+
+from synapse.storage import background_updates
from synapse.storage._base import SQLBaseStore
from synapse.storage.data_stores.main.roommember import ProfileInfo
+BATCH_SIZE = 100
+
class ProfileWorkerStore(SQLBaseStore):
@defer.inlineCallbacks
@@ -57,6 +62,54 @@ class ProfileWorkerStore(SQLBaseStore):
desc="get_profile_avatar_url",
)
+ def get_latest_profile_replication_batch_number(self):
+ def f(txn):
+ txn.execute("SELECT MAX(batch) as maxbatch FROM profiles")
+ rows = self.db.cursor_to_dict(txn)
+ return rows[0]["maxbatch"]
+
+ return self.db.runInteraction("get_latest_profile_replication_batch_number", f)
+
+ def get_profile_batch(self, batchnum):
+ return self.db.simple_select_list(
+ table="profiles",
+ keyvalues={"batch": batchnum},
+ retcols=("user_id", "displayname", "avatar_url", "active"),
+ desc="get_profile_batch",
+ )
+
+ def assign_profile_batch(self):
+ def f(txn):
+ sql = (
+ "UPDATE profiles SET batch = "
+ "(SELECT COALESCE(MAX(batch), -1) + 1 FROM profiles) "
+ "WHERE user_id in ("
+ " SELECT user_id FROM profiles WHERE batch is NULL limit ?"
+ ")"
+ )
+ txn.execute(sql, (BATCH_SIZE,))
+ return txn.rowcount
+
+ return self.db.runInteraction("assign_profile_batch", f)
+
+ def get_replication_hosts(self):
+ def f(txn):
+ txn.execute(
+ "SELECT host, last_synced_batch FROM profile_replication_status"
+ )
+ rows = self.db.cursor_to_dict(txn)
+ return {r["host"]: r["last_synced_batch"] for r in rows}
+
+ return self.db.runInteraction("get_replication_hosts", f)
+
+ def update_replication_batch_for_host(self, host, last_synced_batch):
+ return self.db.simple_upsert(
+ table="profile_replication_status",
+ keyvalues={"host": host},
+ values={"last_synced_batch": last_synced_batch},
+ desc="update_replication_batch_for_host",
+ )
+
def get_from_remote_profile_cache(self, user_id):
return self.db.simple_select_one(
table="remote_profile_cache",
@@ -71,24 +124,53 @@ class ProfileWorkerStore(SQLBaseStore):
table="profiles", values={"user_id": user_localpart}, desc="create_profile"
)
- def set_profile_displayname(self, user_localpart, new_displayname):
- return self.db.simple_update_one(
+ def set_profile_displayname(self, user_localpart, new_displayname, batchnum):
+ return self.db.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
- updatevalues={"displayname": new_displayname},
+ values={"displayname": new_displayname, "batch": batchnum},
desc="set_profile_displayname",
+ lock=False, # we can do this because user_id has a unique index
)
- def set_profile_avatar_url(self, user_localpart, new_avatar_url):
- return self.db.simple_update_one(
+ def set_profile_avatar_url(self, user_localpart, new_avatar_url, batchnum):
+ return self.db.simple_upsert(
table="profiles",
keyvalues={"user_id": user_localpart},
- updatevalues={"avatar_url": new_avatar_url},
+ values={"avatar_url": new_avatar_url, "batch": batchnum},
desc="set_profile_avatar_url",
+ lock=False, # we can do this because user_id has a unique index
+ )
+
+ def set_profile_active(self, user_localpart, active, hide, batchnum):
+ values = {"active": int(active), "batch": batchnum}
+ if not active and not hide:
+ # we are deactivating for real (not in hide mode)
+ # so clear the profile.
+ values["avatar_url"] = None
+ values["displayname"] = None
+ return self.db.simple_upsert(
+ table="profiles",
+ keyvalues={"user_id": user_localpart},
+ values=values,
+ desc="set_profile_active",
+ lock=False, # we can do this because user_id has a unique index
)
class ProfileStore(ProfileWorkerStore):
+ def __init__(self, database, db_conn, hs):
+
+ super(ProfileStore, self).__init__(database, db_conn, hs)
+
+ self.db.updates.register_background_index_update(
+ "profile_replication_status_host_index",
+ index_name="profile_replication_status_idx",
+ table="profile_replication_status",
+ columns=["host"],
+ unique=True,
+ )
+
def add_remote_profile_cache(self, user_id, displayname, avatar_url):
"""Ensure we are caching the remote user's profiles.
@@ -107,7 +189,7 @@ class ProfileStore(ProfileWorkerStore):
)
def update_remote_profile_cache(self, user_id, displayname, avatar_url):
- return self.db.simple_update(
+ return self.db.simple_upsert(
table="remote_profile_cache",
keyvalues={"user_id": user_id},
values={
diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py
index 49306642ed..40f891cc64 100644
--- a/synapse/storage/data_stores/main/registration.py
+++ b/synapse/storage/data_stores/main/registration.py
@@ -158,6 +158,28 @@ class RegistrationWorkerStore(SQLBaseStore):
)
@defer.inlineCallbacks
+ def get_expired_users(self):
+ """Get IDs of all expired users
+
+ Returns:
+ Deferred[list[str]]: List of expired user IDs
+ """
+
+ def get_expired_users_txn(txn, now_ms):
+ sql = """
+ SELECT user_id from account_validity
+ WHERE expiration_ts_ms <= ?
+ """
+ txn.execute(sql, (now_ms,))
+ rows = txn.fetchall()
+ return [row[0] for row in rows]
+
+ res = yield self.db.runInteraction(
+ "get_expired_users", get_expired_users_txn, self.clock.time_msec()
+ )
+ defer.returnValue(res)
+
+ @defer.inlineCallbacks
def set_renewal_token_for_user(self, user_id, renewal_token):
"""Defines a renewal token for a given user.
diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py
index 9a17e336ba..9c63ca5a20 100644
--- a/synapse/storage/data_stores/main/room.py
+++ b/synapse/storage/data_stores/main/room.py
@@ -295,6 +295,24 @@ class RoomWorkerStore(SQLBaseStore):
desc="is_room_blocked",
)
+ @defer.inlineCallbacks
+ def is_room_published(self, room_id):
+ """Check whether a room has been published in the local public room
+ directory.
+
+ Args:
+ room_id (str)
+ Returns:
+ bool: Whether the room is currently published in the room directory
+ """
+ # Get room information
+ room_info = yield self.get_room(room_id)
+ if not room_info:
+ defer.returnValue(False)
+
+ # Check the is_public value
+ defer.returnValue(room_info.get("is_public", False))
+
async def get_rooms_paginate(
self,
start: int,
@@ -449,6 +467,11 @@ class RoomWorkerStore(SQLBaseStore):
Returns:
dict[int, int]: "min_lifetime" and "max_lifetime" for this room.
"""
+ # If the room retention feature is disabled, return a policy with no minimum nor
+ # maximum, in order not to filter out events we should filter out when sending to
+ # the client.
+ if not self.config.retention_enabled:
+ defer.returnValue({"min_lifetime": None, "max_lifetime": None})
def get_retention_policy_for_room_txn(txn):
txn.execute(
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres
new file mode 100644
index 0000000000..c601cff6de
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.postgres
@@ -0,0 +1,35 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- when we first added the room_version column, it was populated via a background
+-- update. We now need it to be populated before synapse starts, so we populate
+-- any remaining rows with a NULL room version now. For servers which have completed
+-- the background update, this will be pretty quick.
+
+-- the following query will set room_version to NULL if no create event is found for
+-- the room in current_state_events, and will set it to '1' if a create event with no
+-- room_version is found.
+
+UPDATE rooms SET room_version=(
+ SELECT COALESCE(json::json->'content'->>'room_version','1')
+ FROM current_state_events cse INNER JOIN event_json ej USING (event_id)
+ WHERE cse.room_id=rooms.room_id AND cse.type='m.room.create' AND cse.state_key=''
+) WHERE rooms.room_version IS NULL;
+
+-- we still allow the background update to complete: it has the useful side-effect of
+-- populating `rooms` with any missing rooms (based on the current_state_events table).
+
+-- see also rooms_version_column_2.sql.sqlite which has a copy of the above query, using
+-- sqlite syntax for the json extraction.
diff --git a/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite
new file mode 100644
index 0000000000..335c6f2074
--- /dev/null
+++ b/synapse/storage/data_stores/main/schema/delta/57/rooms_version_column_2.sql.sqlite
@@ -0,0 +1,22 @@
+/* Copyright 2020 The Matrix.org Foundation C.I.C.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- see rooms_version_column_2.sql.postgres for details of what's going on here.
+
+UPDATE rooms SET room_version=(
+ SELECT COALESCE(json_extract(ej.json, '$.content.room_version'), '1')
+ FROM current_state_events cse INNER JOIN event_json ej USING (event_id)
+ WHERE cse.room_id=rooms.room_id AND cse.type='m.room.create' AND cse.state_key=''
+) WHERE rooms.room_version IS NULL;
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres
index 889a9a0ce4..20c5af2eb7 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres
+++ b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.postgres
@@ -658,10 +658,19 @@ CREATE TABLE presence_stream (
+CREATE TABLE profile_replication_status (
+ host text NOT NULL,
+ last_synced_batch bigint NOT NULL
+);
+
+
+
CREATE TABLE profiles (
user_id text NOT NULL,
displayname text,
- avatar_url text
+ avatar_url text,
+ batch bigint,
+ active smallint DEFAULT 1 NOT NULL
);
@@ -1788,6 +1797,10 @@ CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id);
+CREATE INDEX profiles_batch_idx ON profiles USING btree (batch);
+
+
+
CREATE INDEX public_room_index ON rooms USING btree (is_public);
diff --git a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite
index a0411ede7e..e28ec3fa45 100644
--- a/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite
+++ b/synapse/storage/data_stores/main/schema/full_schemas/54/full.sql.sqlite
@@ -6,7 +6,7 @@ CREATE TABLE presence_allow_inbound( observed_user_id TEXT NOT NULL, observer_us
CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, UNIQUE(name) );
CREATE TABLE access_tokens( id BIGINT PRIMARY KEY, user_id TEXT NOT NULL, device_id TEXT, token TEXT NOT NULL, last_used BIGINT, UNIQUE(token) );
CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL );
-CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) );
+CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, batch BIGINT DEFAULT NULL, active SMALLINT DEFAULT 1 NOT NULL, UNIQUE(user_id) );
CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) );
CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER );
CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, UNIQUE (event_id) );
@@ -202,6 +202,8 @@ CREATE INDEX group_users_u_idx ON group_users(user_id);
CREATE INDEX group_invites_u_idx ON group_invites(user_id);
CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms(group_id, room_id);
CREATE INDEX group_rooms_r_idx ON group_rooms(room_id);
+CREATE INDEX profiles_batch_idx ON profiles(batch);
+CREATE TABLE profile_replication_status ( host TEXT NOT NULL, last_synced_batch BIGINT NOT NULL );
CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL );
CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp);
CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp);
diff --git a/synapse/storage/data_stores/main/stats.py b/synapse/storage/data_stores/main/stats.py
index 7af1495e47..380c1ec7da 100644
--- a/synapse/storage/data_stores/main/stats.py
+++ b/synapse/storage/data_stores/main/stats.py
@@ -271,31 +271,6 @@ class StatsStore(StateDeltasStore):
return slice_list
- def get_room_stats_state(self, room_id):
- """
- Returns the current room_stats_state for a room.
-
- Args:
- room_id (str): The ID of the room to return state for.
-
- Returns (dict):
- Dictionary containing these keys:
- "name", "topic", "canonical_alias", "avatar", "join_rules",
- "history_visibility"
- """
- return self.db.simple_select_one(
- "room_stats_state",
- {"room_id": room_id},
- retcols=(
- "name",
- "topic",
- "canonical_alias",
- "avatar",
- "join_rules",
- "history_visibility",
- ),
- )
-
@cached()
def get_earliest_token_for_stats(self, stats_type, id):
"""
diff --git a/synapse/storage/data_stores/main/user_directory.py b/synapse/storage/data_stores/main/user_directory.py
index 90c180ec6d..6b8130bf0f 100644
--- a/synapse/storage/data_stores/main/user_directory.py
+++ b/synapse/storage/data_stores/main/user_directory.py
@@ -183,7 +183,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
)
return 1
- logger.info(
+ logger.debug(
"Processing the next %d rooms of %d remaining"
% (len(rooms_to_work_on), progress["remaining"])
)
@@ -308,7 +308,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
)
return 1
- logger.info(
+ logger.debug(
"Processing the next %d users of %d remaining"
% (len(users_to_work_on), progress["remaining"])
)
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 1003dd84a5..3eeb2f7c04 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -343,7 +343,7 @@ class Database(object):
top_three_counters = self._txn_perf_counters.interval(duration, limit=3)
- perf_logger.info(
+ perf_logger.debug(
"Total database time: %.3f%% {%s}", ratio * 100, top_three_counters
)
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index af3fd67ab9..a5370ed527 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -390,7 +390,7 @@ class EventsPersistenceStorage(object):
state_delta_reuse_delta_counter.inc()
break
- logger.info("Calculating state delta for room %s", room_id)
+ logger.debug("Calculating state delta for room %s", room_id)
with Measure(
self._clock, "persist_events.get_new_state_after_events"
):
diff --git a/synapse/storage/schema/delta/48/profiles_batch.sql b/synapse/storage/schema/delta/48/profiles_batch.sql
new file mode 100644
index 0000000000..e744c02fe8
--- /dev/null
+++ b/synapse/storage/schema/delta/48/profiles_batch.sql
@@ -0,0 +1,36 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Add a batch number to track changes to profiles and the
+ * order they're made in so we can replicate user profiles
+ * to other hosts as they change
+ */
+ALTER TABLE profiles ADD COLUMN batch BIGINT DEFAULT NULL;
+
+/*
+ * Index on the batch number so we can get profiles
+ * by their batch
+ */
+CREATE INDEX profiles_batch_idx ON profiles(batch);
+
+/*
+ * A table to track what batch of user profiles has been
+ * synced to what profile replication target.
+ */
+CREATE TABLE profile_replication_status (
+ host TEXT NOT NULL,
+ last_synced_batch BIGINT NOT NULL
+);
diff --git a/synapse/storage/schema/delta/50/profiles_deactivated_users.sql b/synapse/storage/schema/delta/50/profiles_deactivated_users.sql
new file mode 100644
index 0000000000..c8893ecbe8
--- /dev/null
+++ b/synapse/storage/schema/delta/50/profiles_deactivated_users.sql
@@ -0,0 +1,23 @@
+/* Copyright 2018 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * A flag saying whether the user owning the profile has been deactivated
+ * This really belongs on the users table, not here, but the users table
+ * stores users by their full user_id and profiles stores them by localpart,
+ * so we can't easily join between the two tables. Plus, the batch number
+ * realy ought to represent data in this table that has changed.
+ */
+ALTER TABLE profiles ADD COLUMN active SMALLINT DEFAULT 1 NOT NULL;
diff --git a/synapse/storage/schema/delta/55/profile_replication_status_index.sql b/synapse/storage/schema/delta/55/profile_replication_status_index.sql
new file mode 100644
index 0000000000..18a0f7e10c
--- /dev/null
+++ b/synapse/storage/schema/delta/55/profile_replication_status_index.sql
@@ -0,0 +1,17 @@
+/* Copyright 2019 New Vector Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+INSERT INTO background_updates (update_name, progress_json) VALUES
+ ('profile_replication_status_host_index', '{}');
diff --git a/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql b/synapse/storage/schema/delta/55/room_retention.sql
index ee6cdf7a14..ee6cdf7a14 100644
--- a/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql
+++ b/synapse/storage/schema/delta/55/room_retention.sql
diff --git a/synapse/third_party_rules/access_rules.py b/synapse/third_party_rules/access_rules.py
new file mode 100644
index 0000000000..253bba664b
--- /dev/null
+++ b/synapse/third_party_rules/access_rules.py
@@ -0,0 +1,586 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import email.utils
+
+from twisted.internet import defer
+
+from synapse.api.constants import EventTypes, JoinRules, Membership, RoomCreationPreset
+from synapse.api.errors import SynapseError
+from synapse.config._base import ConfigError
+from synapse.types import get_domain_from_id
+
+ACCESS_RULES_TYPE = "im.vector.room.access_rules"
+ACCESS_RULE_RESTRICTED = "restricted"
+ACCESS_RULE_UNRESTRICTED = "unrestricted"
+ACCESS_RULE_DIRECT = "direct"
+
+VALID_ACCESS_RULES = (
+ ACCESS_RULE_DIRECT,
+ ACCESS_RULE_RESTRICTED,
+ ACCESS_RULE_UNRESTRICTED,
+)
+
+# Rules to which we need to apply the power levels restrictions.
+#
+# These are all of the rules that neither:
+# * forbid users from joining based on a server blacklist (which means that there
+# is no need to apply power level restrictions), nor
+# * target direct chats (since we allow both users to be room admins in this case).
+#
+# The power-level restrictions, when they are applied, prevent the following:
+# * the default power level for users (users_default) being set to anything other than 0.
+# * a non-default power level being assigned to any user which would be forbidden from
+# joining a restricted room.
+RULES_WITH_RESTRICTED_POWER_LEVELS = (ACCESS_RULE_UNRESTRICTED,)
+
+
+class RoomAccessRules(object):
+ """Implementation of the ThirdPartyEventRules module API that allows federation admins
+ to define custom rules for specific events and actions.
+ Implements the custom behaviour for the "im.vector.room.access_rules" state event.
+
+ Takes a config in the format:
+
+ third_party_event_rules:
+ module: third_party_rules.RoomAccessRules
+ config:
+ # List of domains (server names) that can't be invited to rooms if the
+ # "restricted" rule is set. Defaults to an empty list.
+ domains_forbidden_when_restricted: []
+
+ # Identity server to use when checking the HS an email address belongs to
+ # using the /info endpoint. Required.
+ id_server: "vector.im"
+
+ Don't forget to consider if you can invite users from your own domain.
+ """
+
+ def __init__(self, config, http_client):
+ self.http_client = http_client
+
+ self.id_server = config["id_server"]
+
+ self.domains_forbidden_when_restricted = config.get(
+ "domains_forbidden_when_restricted", []
+ )
+
+ @staticmethod
+ def parse_config(config):
+ if "id_server" in config:
+ return config
+ else:
+ raise ConfigError("No IS for event rules TchapEventRules")
+
+ def on_create_room(self, requester, config, is_requester_admin):
+ """Implements synapse.events.ThirdPartyEventRules.on_create_room
+
+ Checks if a im.vector.room.access_rules event is being set during room creation.
+ If yes, make sure the event is correct. Otherwise, append an event with the
+ default rule to the initial state.
+ """
+ is_direct = config.get("is_direct")
+ preset = config.get("preset")
+ access_rule = None
+ join_rule = None
+
+ # If there's a rules event in the initial state, check if it complies with the
+ # spec for im.vector.room.access_rules and deny the request if not.
+ for event in config.get("initial_state", []):
+ if event["type"] == ACCESS_RULES_TYPE:
+ access_rule = event["content"].get("rule")
+
+ # Make sure the event has a valid content.
+ if access_rule is None:
+ raise SynapseError(400, "Invalid access rule")
+
+ # Make sure the rule name is valid.
+ if access_rule not in VALID_ACCESS_RULES:
+ raise SynapseError(400, "Invalid access rule")
+
+ # Make sure the rule is "direct" if the room is a direct chat.
+ if (is_direct and access_rule != ACCESS_RULE_DIRECT) or (
+ access_rule == ACCESS_RULE_DIRECT and not is_direct
+ ):
+ raise SynapseError(400, "Invalid access rule")
+
+ if event["type"] == EventTypes.JoinRules:
+ join_rule = event["content"].get("join_rule")
+
+ if access_rule is None:
+ # If there's no access rules event in the initial state, create one with the
+ # default setting.
+ if is_direct:
+ default_rule = ACCESS_RULE_DIRECT
+ else:
+ # If the default value for non-direct chat changes, we should make another
+ # case here for rooms created with either a "public" join_rule or the
+ # "public_chat" preset to make sure those keep defaulting to "restricted"
+ default_rule = ACCESS_RULE_RESTRICTED
+
+ if not config.get("initial_state"):
+ config["initial_state"] = []
+
+ config["initial_state"].append(
+ {
+ "type": ACCESS_RULES_TYPE,
+ "state_key": "",
+ "content": {"rule": default_rule},
+ }
+ )
+
+ access_rule = default_rule
+
+ # Check that the preset or the join rule in use is compatible with the access
+ # rule, whether it's a user-defined one or the default one (i.e. if it involves
+ # a "public" join rule, the access rule must be "restricted").
+ if (
+ join_rule == JoinRules.PUBLIC or preset == RoomCreationPreset.PUBLIC_CHAT
+ ) and access_rule != ACCESS_RULE_RESTRICTED:
+ raise SynapseError(400, "Invalid access rule")
+
+ # Check if the creator can override values for the power levels.
+ allowed = self._is_power_level_content_allowed(
+ config.get("power_level_content_override", {}), access_rule
+ )
+ if not allowed:
+ raise SynapseError(400, "Invalid power levels content override")
+
+ # Second loop for events we need to know the current rule to process.
+ for event in config.get("initial_state", []):
+ if event["type"] == EventTypes.PowerLevels:
+ allowed = self._is_power_level_content_allowed(
+ event["content"], access_rule
+ )
+ if not allowed:
+ raise SynapseError(400, "Invalid power levels content")
+
+ @defer.inlineCallbacks
+ def check_threepid_can_be_invited(self, medium, address, state_events):
+ """Implements synapse.events.ThirdPartyEventRules.check_threepid_can_be_invited
+
+ Check if a threepid can be invited to the room via a 3PID invite given the current
+ rules and the threepid's address, by retrieving the HS it's mapped to from the
+ configured identity server, and checking if we can invite users from it.
+ """
+ rule = self._get_rule_from_state(state_events)
+
+ if medium != "email":
+ defer.returnValue(False)
+
+ if rule != ACCESS_RULE_RESTRICTED:
+ # Only "restricted" requires filtering 3PID invites. We don't need to do
+ # anything for "direct" here, because only "restricted" requires filtering
+ # based on the HS the address is mapped to.
+ defer.returnValue(True)
+
+ parsed_address = email.utils.parseaddr(address)[1]
+ if parsed_address != address:
+ # Avoid reproducing the security issue described here:
+ # https://matrix.org/blog/2019/04/18/security-update-sydent-1-0-2
+ # It's probably not worth it but let's just be overly safe here.
+ defer.returnValue(False)
+
+ # Get the HS this address belongs to from the identity server.
+ res = yield self.http_client.get_json(
+ "https://%s/_matrix/identity/api/v1/info" % (self.id_server,),
+ {"medium": medium, "address": address},
+ )
+
+ # Look for a domain that's not forbidden from being invited.
+ if not res.get("hs"):
+ defer.returnValue(False)
+ if res.get("hs") in self.domains_forbidden_when_restricted:
+ defer.returnValue(False)
+
+ defer.returnValue(True)
+
+ def check_event_allowed(self, event, state_events):
+ """Implements synapse.events.ThirdPartyEventRules.check_event_allowed
+
+ Checks the event's type and the current rule and calls the right function to
+ determine whether the event can be allowed.
+ """
+ if event.type == ACCESS_RULES_TYPE:
+ return self._on_rules_change(event, state_events)
+
+ # We need to know the rule to apply when processing the event types below.
+ rule = self._get_rule_from_state(state_events)
+
+ if event.type == EventTypes.PowerLevels:
+ return self._is_power_level_content_allowed(event.content, rule)
+
+ if event.type == EventTypes.Member or event.type == EventTypes.ThirdPartyInvite:
+ return self._on_membership_or_invite(event, rule, state_events)
+
+ if event.type == EventTypes.JoinRules:
+ return self._on_join_rule_change(event, rule)
+
+ if event.type == EventTypes.RoomAvatar:
+ return self._on_room_avatar_change(event, rule)
+
+ if event.type == EventTypes.Name:
+ return self._on_room_name_change(event, rule)
+
+ if event.type == EventTypes.Topic:
+ return self._on_room_topic_change(event, rule)
+
+ return True
+
+ def _on_rules_change(self, event, state_events):
+ """Implement the checks and behaviour specified on allowing or forbidding a new
+ im.vector.room.access_rules event.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ state_events (dict[tuple[event type, state key], EventBase]): The state of the
+ room before the event was sent.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ new_rule = event.content.get("rule")
+
+ # Check for invalid values.
+ if new_rule not in VALID_ACCESS_RULES:
+ return False
+
+ # We must not allow rooms with the "public" join rule to be given any other access
+ # rule than "restricted".
+ join_rule = self._get_join_rule_from_state(state_events)
+ if join_rule == JoinRules.PUBLIC and new_rule != ACCESS_RULE_RESTRICTED:
+ return False
+
+ # Make sure we don't apply "direct" if the room has more than two members.
+ if new_rule == ACCESS_RULE_DIRECT:
+ existing_members, threepid_tokens = self._get_members_and_tokens_from_state(
+ state_events
+ )
+
+ if len(existing_members) > 2 or len(threepid_tokens) > 1:
+ return False
+
+ prev_rules_event = state_events.get((ACCESS_RULES_TYPE, ""))
+
+ # Now that we know the new rule doesn't break the "direct" case, we can allow any
+ # new rule in rooms that had none before.
+ if prev_rules_event is None:
+ return True
+
+ prev_rule = prev_rules_event.content.get("rule")
+
+ # Currently, we can only go from "restricted" to "unrestricted".
+ if prev_rule == ACCESS_RULE_RESTRICTED and new_rule == ACCESS_RULE_UNRESTRICTED:
+ return True
+
+ return False
+
+ def _on_membership_or_invite(self, event, rule, state_events):
+ """Applies the correct rule for incoming m.room.member and
+ m.room.third_party_invite events.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ rule (str): The name of the rule to apply.
+ state_events (dict[tuple[event type, state key], EventBase]): The state of the
+ room before the event was sent.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ if rule == ACCESS_RULE_RESTRICTED:
+ ret = self._on_membership_or_invite_restricted(event)
+ elif rule == ACCESS_RULE_UNRESTRICTED:
+ ret = self._on_membership_or_invite_unrestricted()
+ elif rule == ACCESS_RULE_DIRECT:
+ ret = self._on_membership_or_invite_direct(event, state_events)
+ else:
+ # We currently apply the default (restricted) if we don't know the rule, we
+ # might want to change that in the future.
+ ret = self._on_membership_or_invite_restricted(event)
+
+ return ret
+
+ def _on_membership_or_invite_restricted(self, event):
+ """Implements the checks and behaviour specified for the "restricted" rule.
+
+ "restricted" currently means that users can only invite users if their server is
+ included in a limited list of domains.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ # We're not applying the rules on m.room.third_party_member events here because
+ # the filtering on threepids is done in check_threepid_can_be_invited, which is
+ # called before check_event_allowed.
+ if event.type == EventTypes.ThirdPartyInvite:
+ return True
+
+ # We only need to process "join" and "invite" memberships, in order to be backward
+ # compatible, e.g. if a user from a blacklisted server joined a restricted room
+ # before the rules started being enforced on the server, that user must be able to
+ # leave it.
+ if event.membership not in [Membership.JOIN, Membership.INVITE]:
+ return True
+
+ invitee_domain = get_domain_from_id(event.state_key)
+ return invitee_domain not in self.domains_forbidden_when_restricted
+
+ def _on_membership_or_invite_unrestricted(self):
+ """Implements the checks and behaviour specified for the "unrestricted" rule.
+
+ "unrestricted" currently means that every event is allowed.
+
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ return True
+
+ def _on_membership_or_invite_direct(self, event, state_events):
+ """Implements the checks and behaviour specified for the "direct" rule.
+
+ "direct" currently means that no member is allowed apart from the two initial
+ members the room was created for (i.e. the room's creator and their first
+ invitee).
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ state_events (dict[tuple[event type, state key], EventBase]): The state of the
+ room before the event was sent.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ # Get the room memberships and 3PID invite tokens from the room's state.
+ existing_members, threepid_tokens = self._get_members_and_tokens_from_state(
+ state_events
+ )
+
+ # There should never be more than one 3PID invite in the room state: if the second
+ # original user came and left, and we're inviting them using their email address,
+ # given we know they have a Matrix account binded to the address (so they could
+ # join the first time), Synapse will successfully look it up before attempting to
+ # store an invite on the IS.
+ if len(threepid_tokens) == 1 and event.type == EventTypes.ThirdPartyInvite:
+ # If we already have a 3PID invite in flight, don't accept another one, unless
+ # the new one has the same invite token as its state key. This is because 3PID
+ # invite revocations must be allowed, and a revocation is basically a new 3PID
+ # invite event with an empty content and the same token as the invite it
+ # revokes.
+ return event.state_key in threepid_tokens
+
+ if len(existing_members) == 2:
+ # If the user was within the two initial user of the room, Synapse would have
+ # looked it up successfully and thus sent a m.room.member here instead of
+ # m.room.third_party_invite.
+ if event.type == EventTypes.ThirdPartyInvite:
+ return False
+
+ # We can only have m.room.member events here. The rule in this case is to only
+ # allow the event if its target is one of the initial two members in the room,
+ # i.e. the state key of one of the two m.room.member states in the room.
+ return event.state_key in existing_members
+
+ # We're alone in the room (and always have been) and there's one 3PID invite in
+ # flight.
+ if len(existing_members) == 1 and len(threepid_tokens) == 1:
+ # We can only have m.room.member events here. In this case, we can only allow
+ # the event if it's either a m.room.member from the joined user (we can assume
+ # that the only m.room.member event is a join otherwise we wouldn't be able to
+ # send an event to the room) or an an invite event which target is the invited
+ # user.
+ target = event.state_key
+ is_from_threepid_invite = self._is_invite_from_threepid(
+ event, threepid_tokens[0]
+ )
+ if is_from_threepid_invite or target == existing_members[0]:
+ return True
+
+ return False
+
+ return True
+
+ def _is_power_level_content_allowed(self, content, access_rule):
+ """Check if a given power levels event is permitted under the given access rule.
+
+ It shouldn't be allowed if it either changes the default PL to a non-0 value or
+ gives a non-0 PL to a user that would have been forbidden from joining the room
+ under a more restrictive access rule.
+
+ Args:
+ content (dict[]): The content of the m.room.power_levels event to check.
+ access_rule (str): The access rule in place in this room.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ # Check if we need to apply the restrictions with the current rule.
+ if access_rule not in RULES_WITH_RESTRICTED_POWER_LEVELS:
+ return True
+
+ # If users_default is explicitly set to a non-0 value, deny the event.
+ users_default = content.get("users_default", 0)
+ if users_default:
+ return False
+
+ users = content.get("users", {})
+ for user_id, power_level in users.items():
+ server_name = get_domain_from_id(user_id)
+ # Check the domain against the blacklist. If found, and the PL isn't 0, deny
+ # the event.
+ if (
+ server_name in self.domains_forbidden_when_restricted
+ and power_level != 0
+ ):
+ return False
+
+ return True
+
+ def _on_join_rule_change(self, event, rule):
+ """Check whether a join rule change is allowed. A join rule change is always
+ allowed unless the new join rule is "public" and the current access rule isn't
+ "restricted".
+ The rationale is that external users (those whose server would be denied access
+ to rooms enforcing the "restricted" access rule) should always rely on non-
+ external users for access to rooms, therefore they shouldn't be able to access
+ rooms that don't require an invite to be joined.
+
+ Note that we currently rely on the default access rule being "restricted": during
+ room creation, the m.room.join_rules event will be sent *before* the
+ im.vector.room.access_rules one, so the access rule that will be considered here
+ in this case will be the default "restricted" one. This is fine since the
+ "restricted" access rule allows any value for the join rule, but we should keep
+ that in mind if we need to change the default access rule in the future.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ rule (str): The name of the rule to apply.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ if event.content.get("join_rule") == JoinRules.PUBLIC:
+ return rule == ACCESS_RULE_RESTRICTED
+
+ return True
+
+ def _on_room_avatar_change(self, event, rule):
+ """Check whether a change of room avatar is allowed.
+ The current rule is to forbid such a change in direct chats but allow it
+ everywhere else.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ rule (str): The name of the rule to apply.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ return rule != ACCESS_RULE_DIRECT
+
+ def _on_room_name_change(self, event, rule):
+ """Check whether a change of room name is allowed.
+ The current rule is to forbid such a change in direct chats but allow it
+ everywhere else.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ rule (str): The name of the rule to apply.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ return rule != ACCESS_RULE_DIRECT
+
+ def _on_room_topic_change(self, event, rule):
+ """Check whether a change of room topic is allowed.
+ The current rule is to forbid such a change in direct chats but allow it
+ everywhere else.
+
+ Args:
+ event (synapse.events.EventBase): The event to check.
+ rule (str): The name of the rule to apply.
+ Returns:
+ bool, True if the event can be allowed, False otherwise.
+ """
+ return rule != ACCESS_RULE_DIRECT
+
+ @staticmethod
+ def _get_rule_from_state(state_events):
+ """Extract the rule to be applied from the given set of state events.
+
+ Args:
+ state_events (dict[tuple[event type, state key], EventBase]): The set of state
+ events.
+ Returns:
+ str, the name of the rule (either "direct", "restricted" or "unrestricted")
+ """
+ access_rules = state_events.get((ACCESS_RULES_TYPE, ""))
+ if access_rules is None:
+ rule = ACCESS_RULE_RESTRICTED
+ else:
+ rule = access_rules.content.get("rule")
+ return rule
+
+ @staticmethod
+ def _get_join_rule_from_state(state_events):
+ """Extract the room's join rule from the given set of state events.
+
+ Args:
+ state_events (dict[tuple[event type, state key], EventBase]): The set of state
+ events.
+ Returns:
+ str, the name of the join rule (either "public", or "invite")
+ """
+ join_rule_event = state_events.get((EventTypes.JoinRules, ""))
+ if join_rule_event is None:
+ return None
+ return join_rule_event.content.get("join_rule")
+
+ @staticmethod
+ def _get_members_and_tokens_from_state(state_events):
+ """Retrieves from a list of state events the list of users that have a
+ m.room.member event in the room, and the tokens of 3PID invites in the room.
+
+ Args:
+ state_events (dict[tuple[event type, state key], EventBase]): The set of state
+ events.
+ Returns:
+ existing_members (list[str]): List of targets of the m.room.member events in
+ the state.
+ threepid_invite_tokens (list[str]): List of tokens of the 3PID invites in the
+ state.
+ """
+ existing_members = []
+ threepid_invite_tokens = []
+ for key, state_event in state_events.items():
+ if key[0] == EventTypes.Member and state_event.content:
+ existing_members.append(state_event.state_key)
+ if key[0] == EventTypes.ThirdPartyInvite and state_event.content:
+ # Don't include revoked invites.
+ threepid_invite_tokens.append(state_event.state_key)
+
+ return existing_members, threepid_invite_tokens
+
+ @staticmethod
+ def _is_invite_from_threepid(invite, threepid_invite_token):
+ """Checks whether the given invite follows the given 3PID invite.
+
+ Args:
+ invite (EventBase): The m.room.member event with "invite" membership.
+ threepid_invite_token (str): The state key from the 3PID invite.
+ """
+ token = (
+ invite.content.get("third_party_invite", {})
+ .get("signed", {})
+ .get("token", "")
+ )
+
+ return token == threepid_invite_token
diff --git a/synapse/types.py b/synapse/types.py
index f3cd465735..16a7f87011 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -19,6 +19,8 @@ import sys
from collections import namedtuple
from typing import Any, Dict, Tuple, TypeVar
+from six.moves import filter
+
import attr
from signedjson.key import decode_verify_key_bytes
from unpaddedbase64 import decode_base64
@@ -262,6 +264,19 @@ def contains_invalid_mxid_characters(localpart):
return any(c not in mxid_localpart_allowed_characters for c in localpart)
+def strip_invalid_mxid_characters(localpart):
+ """Removes any invalid characters from an mxid
+
+ Args:
+ localpart (basestring): the localpart to be stripped
+
+ Returns:
+ localpart (basestring): the localpart having been stripped
+ """
+ filtered = filter(lambda c: c in mxid_localpart_allowed_characters, localpart)
+ return "".join(filtered)
+
+
UPPER_CASE_PATTERN = re.compile(b"[A-Z_]")
# the following is a pattern which matches '=', and bytes which are not allowed in a mxid
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 04b6abdc24..581dffd8a0 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -73,6 +73,10 @@ class ObservableDeferred(object):
def errback(f):
object.__setattr__(self, "_result", (False, f))
while self._observers:
+ # This is a little bit of magic to correctly propagate stack
+ # traces when we `await` on one of the observer deferreds.
+ f.value.__failure__ = f
+
try:
# TODO: Handle errors here.
self._observers.pop().errback(f)
diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py
index 82d3eefe0e..b68f9fe0d4 100644
--- a/synapse/util/caches/response_cache.py
+++ b/synapse/util/caches/response_cache.py
@@ -144,7 +144,7 @@ class ResponseCache(object):
"""
result = self.get(key)
if not result:
- logger.info(
+ logger.debug(
"[%s]: no cached result for [%s], calculating new one", self._name, key
)
d = run_in_background(callback, *args, **kwargs)
diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py
index 3ec1dfb0c2..34ce7cac16 100644
--- a/synapse/util/threepids.py
+++ b/synapse/util/threepids.py
@@ -16,11 +16,14 @@
import logging
import re
+from twisted.internet import defer
+
logger = logging.getLogger(__name__)
+@defer.inlineCallbacks
def check_3pid_allowed(hs, medium, address):
- """Checks whether a given format of 3PID is allowed to be used on this HS
+ """Checks whether a given 3PID is allowed to be used on this HS
Args:
hs (synapse.server.HomeServer): server
@@ -28,9 +31,36 @@ def check_3pid_allowed(hs, medium, address):
address (str): address within that medium (e.g. "wotan@matrix.org")
msisdns need to first have been canonicalised
Returns:
- bool: whether the 3PID medium/address is allowed to be added to this HS
+ defered bool: whether the 3PID medium/address is allowed to be added to this HS
"""
+ if hs.config.check_is_for_allowed_local_3pids:
+ data = yield hs.get_simple_http_client().get_json(
+ "https://%s%s"
+ % (
+ hs.config.check_is_for_allowed_local_3pids,
+ "/_matrix/identity/api/v1/internal-info",
+ ),
+ {"medium": medium, "address": address},
+ )
+
+ # Check for invalid response
+ if "hs" not in data and "shadow_hs" not in data:
+ defer.returnValue(False)
+
+ # Check if this user is intended to register for this homeserver
+ if (
+ data.get("hs") != hs.config.server_name
+ and data.get("shadow_hs") != hs.config.server_name
+ ):
+ defer.returnValue(False)
+
+ if data.get("requires_invite", False) and not data.get("invited", False):
+ # Requires an invite but hasn't been invited
+ defer.returnValue(False)
+
+ defer.returnValue(True)
+
if hs.config.allowed_local_3pids:
for constraint in hs.config.allowed_local_3pids:
logger.debug(
@@ -43,8 +73,8 @@ def check_3pid_allowed(hs, medium, address):
if medium == constraint["medium"] and re.match(
constraint["pattern"], address
):
- return True
+ defer.returnValue(True)
else:
- return True
+ defer.returnValue(True)
- return False
+ defer.returnValue(False)
diff --git a/sytest-blacklist b/sytest-blacklist
index 79b2d4402a..fd50197b13 100644
--- a/sytest-blacklist
+++ b/sytest-blacklist
@@ -36,3 +36,24 @@ Inbound federation of state requires event_id as a mandatory paramater
# Blacklisted until https://github.com/matrix-org/synapse/pull/6486 lands
Can upload self-signing keys
+
+# flaky test
+If remote user leaves room we no longer receive device updates
+
+# flaky test
+Can re-join room if re-invited
+
+# flaky test
+Forgotten room messages cannot be paginated
+
+# flaky test
+Local device key changes get to remote servers
+
+# flaky test
+Old leaves are present in gapped incremental syncs
+
+# flaky test on workers
+Old members are included in gappy incr LL sync if they start speaking
+
+# flaky test on workers
+Presence changes to UNAVAILABLE are reported to remote room members
diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py
index 63d8633582..4e67503cf0 100644
--- a/tests/api/test_filtering.py
+++ b/tests/api/test_filtering.py
@@ -25,7 +25,7 @@ from twisted.internet import defer
from synapse.api.constants import EventContentFields
from synapse.api.errors import SynapseError
from synapse.api.filtering import Filter
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
from tests import unittest
from tests.utils import DeferredMockCallable, MockHttpResource, setup_test_homeserver
@@ -38,7 +38,7 @@ def MockEvent(**kwargs):
kwargs["event_id"] = "fake_event_id"
if "type" not in kwargs:
kwargs["type"] = "fake_type"
- return FrozenEvent(kwargs)
+ return make_event_from_dict(kwargs)
class FilteringTestCase(unittest.TestCase):
diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py
index 6143a50ab2..62f639a18d 100644
--- a/tests/crypto/test_event_signing.py
+++ b/tests/crypto/test_event_signing.py
@@ -19,7 +19,7 @@ from unpaddedbase64 import decode_base64
from synapse.api.room_versions import RoomVersions
from synapse.crypto.event_signing import add_hashes_and_signatures
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
from tests import unittest
@@ -54,7 +54,7 @@ class EventSigningTestCase(unittest.TestCase):
RoomVersions.V1, event_dict, HOSTNAME, self.signing_key
)
- event = FrozenEvent(event_dict)
+ event = make_event_from_dict(event_dict)
self.assertTrue(hasattr(event, "hashes"))
self.assertIn("sha256", event.hashes)
@@ -88,7 +88,7 @@ class EventSigningTestCase(unittest.TestCase):
RoomVersions.V1, event_dict, HOSTNAME, self.signing_key
)
- event = FrozenEvent(event_dict)
+ event = make_event_from_dict(event_dict)
self.assertTrue(hasattr(event, "hashes"))
self.assertIn("sha256", event.hashes)
diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py
index 2b13980dfd..45d55b9e94 100644
--- a/tests/events/test_utils.py
+++ b/tests/events/test_utils.py
@@ -13,8 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
from synapse.events.utils import (
copy_power_levels_contents,
prune_event,
@@ -30,7 +29,7 @@ def MockEvent(**kwargs):
kwargs["event_id"] = "fake_event_id"
if "type" not in kwargs:
kwargs["type"] = "fake_type"
- return FrozenEvent(kwargs)
+ return make_event_from_dict(kwargs)
class PruneEventTestCase(unittest.TestCase):
@@ -38,7 +37,9 @@ class PruneEventTestCase(unittest.TestCase):
`matchdict` when it is redacted. """
def run_test(self, evdict, matchdict):
- self.assertEquals(prune_event(FrozenEvent(evdict)).get_dict(), matchdict)
+ self.assertEquals(
+ prune_event(make_event_from_dict(evdict)).get_dict(), matchdict
+ )
def test_minimal(self):
self.run_test(
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index 1ec8c40901..e7d8699040 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -15,7 +15,7 @@
# limitations under the License.
import logging
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
from synapse.federation.federation_server import server_matches_acl_event
from synapse.rest import admin
from synapse.rest.client.v1 import login, room
@@ -105,7 +105,7 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase):
def _create_acl_event(content):
- return FrozenEvent(
+ return make_event_from_dict(
{
"room_id": "!a:b",
"event_id": "$a:b",
diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py
index a3aa0a1cf2..62b47f6574 100644
--- a/tests/handlers/test_device.py
+++ b/tests/handlers/test_device.py
@@ -160,6 +160,24 @@ class DeviceTestCase(unittest.HomeserverTestCase):
res = self.get_success(self.handler.get_device(user1, "abc"))
self.assertEqual(res["display_name"], "new display")
+ def test_update_device_too_long_display_name(self):
+ """Update a device with a display name that is invalid (too long)."""
+ self._record_users()
+
+ # Request to update a device display name with a new value that is longer than allowed.
+ update = {
+ "display_name": "a"
+ * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1)
+ }
+ self.get_failure(
+ self.handler.update_device(user1, "abc", update),
+ synapse.api.errors.SynapseError,
+ )
+
+ # Ensure the display name was not updated.
+ res = self.get_success(self.handler.get_device(user1, "abc"))
+ self.assertEqual(res["display_name"], "display 2")
+
def test_update_unknown_device(self):
update = {"display_name": "new_display"}
res = self.handler.update_device("user_id", "unknown_device_id", update)
diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py
index b4d92cf732..132e35651d 100644
--- a/tests/handlers/test_federation.py
+++ b/tests/handlers/test_federation.py
@@ -99,6 +99,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
user_id = self.register_user("kermit", "test")
tok = self.login("kermit", "test")
room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+ room_version = self.get_success(self.store.get_room_version(room_id))
# pretend that another server has joined
join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id)
@@ -120,7 +121,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
"auth_events": [],
"origin_server_ts": self.clock.time_msec(),
},
- join_event.format_version,
+ room_version,
)
with LoggingContext(request="send_rejected"):
@@ -149,6 +150,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
user_id = self.register_user("kermit", "test")
tok = self.login("kermit", "test")
room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
+ room_version = self.get_success(self.store.get_room_version(room_id))
# pretend that another server has joined
join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id)
@@ -171,7 +173,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
"auth_events": [],
"origin_server_ts": self.clock.time_msec(),
},
- join_event.format_version,
+ room_version,
)
with LoggingContext(request="send_rejected"):
diff --git a/tests/handlers/test_identity.py b/tests/handlers/test_identity.py
new file mode 100644
index 0000000000..34f6bfb422
--- /dev/null
+++ b/tests/handlers/test_identity.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from mock import Mock
+
+from twisted.internet import defer
+
+import synapse.rest.admin
+from synapse.rest.client.v1 import login
+from synapse.rest.client.v2_alpha import account
+
+from tests import unittest
+
+
+class ThreepidISRewrittenURLTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ account.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ self.address = "test@test"
+ self.is_server_name = "testis"
+ self.rewritten_is_url = "int.testis"
+
+ config = self.default_config()
+ config["trusted_third_party_id_servers"] = [self.is_server_name]
+ config["rewrite_identity_server_urls"] = {
+ self.is_server_name: self.rewritten_is_url
+ }
+
+ mock_http_client = Mock(spec=["get_json", "post_json_get_json"])
+ mock_http_client.get_json.side_effect = defer.succeed({})
+ mock_http_client.post_json_get_json.return_value = defer.succeed(
+ {"address": self.address, "medium": "email"}
+ )
+
+ self.hs = self.setup_test_homeserver(
+ config=config, simple_http_client=mock_http_client
+ )
+
+ mock_blacklisting_http_client = Mock(spec=["get_json", "post_json_get_json"])
+ mock_blacklisting_http_client.get_json.side_effect = defer.succeed({})
+ mock_blacklisting_http_client.post_json_get_json.return_value = defer.succeed(
+ {"address": self.address, "medium": "email"}
+ )
+
+ # TODO: This class does not use a singleton to get it's http client
+ # This should be fixed for easier testing
+ # https://github.com/matrix-org/synapse-dinsic/issues/26
+ self.hs.get_handlers().identity_handler.blacklisting_http_client = (
+ mock_blacklisting_http_client
+ )
+
+ return self.hs
+
+ def prepare(self, reactor, clock, hs):
+ self.user_id = self.register_user("kermit", "monkey")
+
+ def test_rewritten_id_server(self):
+ """
+ Tests that, when validating a 3PID association while rewriting the IS's server
+ name:
+ * the bind request is done against the rewritten hostname
+ * the original, non-rewritten, server name is stored in the database
+ """
+ handler = self.hs.get_handlers().identity_handler
+ post_json_get_json = handler.blacklisting_http_client.post_json_get_json
+ store = self.hs.get_datastore()
+
+ creds = {"sid": "123", "client_secret": "some_secret"}
+
+ # Make sure processing the mocked response goes through.
+ data = self.get_success(
+ handler.bind_threepid(
+ client_secret=creds["client_secret"],
+ sid=creds["sid"],
+ mxid=self.user_id,
+ id_server=self.is_server_name,
+ use_v2=False,
+ )
+ )
+ self.assertEqual(data.get("address"), self.address)
+
+ # Check that the request was done against the rewritten server name.
+ post_json_get_json.assert_called_once_with(
+ "https://%s/_matrix/identity/api/v1/3pid/bind" % self.rewritten_is_url,
+ {
+ "sid": creds["sid"],
+ "client_secret": creds["client_secret"],
+ "mxid": self.user_id,
+ },
+ headers={},
+ )
+
+ # Check that the original server name is saved in the database instead of the
+ # rewritten one.
+ id_servers = self.get_success(
+ store.get_id_servers_user_bound(self.user_id, "email", self.address)
+ )
+ self.assertEqual(id_servers, [self.is_server_name])
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index d60c124eec..2311040201 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -67,13 +67,11 @@ class ProfileTestCase(unittest.TestCase):
self.bob = UserID.from_string("@4567:test")
self.alice = UserID.from_string("@alice:remote")
- yield self.store.create_profile(self.frank.localpart)
-
self.handler = hs.get_profile_handler()
@defer.inlineCallbacks
def test_get_my_name(self):
- yield self.store.set_profile_displayname(self.frank.localpart, "Frank")
+ yield self.store.set_profile_displayname(self.frank.localpart, "Frank", 1)
displayname = yield self.handler.get_displayname(self.frank)
@@ -116,8 +114,7 @@ class ProfileTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_incoming_fed_query(self):
- yield self.store.create_profile("caroline")
- yield self.store.set_profile_displayname("caroline", "Caroline")
+ yield self.store.set_profile_displayname("caroline", "Caroline", 1)
response = yield self.query_handlers["profile"](
{"user_id": "@caroline:test", "field": "displayname"}
@@ -128,7 +125,7 @@ class ProfileTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_get_my_avatar(self):
yield self.store.set_profile_avatar_url(
- self.frank.localpart, "http://my.server/me.png"
+ self.frank.localpart, "http://my.server/me.png", 1
)
avatar_url = yield self.handler.get_avatar_url(self.frank)
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index e2915eb7b1..5e7f14a3d5 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -20,6 +20,7 @@ from twisted.internet import defer
from synapse.api.constants import UserTypes
from synapse.api.errors import Codes, ResourceLimitError, SynapseError
from synapse.handlers.register import RegistrationHandler
+from synapse.rest.client.v2_alpha.register import _map_email_to_displayname
from synapse.types import RoomAlias, UserID, create_requester
from .. import unittest
@@ -256,6 +257,26 @@ class RegistrationTestCase(unittest.HomeserverTestCase):
self.handler.register_user(localpart=invalid_user_id), SynapseError
)
+ def test_email_to_displayname_mapping(self):
+ """Test that custom emails are mapped to new user displaynames correctly"""
+ self._check_mapping(
+ "jack-phillips.rivers@big-org.com", "Jack-Phillips Rivers [Big-Org]"
+ )
+
+ self._check_mapping("bob.jones@matrix.org", "Bob Jones [Tchap Admin]")
+
+ self._check_mapping("bob-jones.blabla@gouv.fr", "Bob-Jones Blabla [Gouv]")
+
+ # Multibyte unicode characters
+ self._check_mapping(
+ "j\u030a\u0065an-poppy.seed@example.com",
+ "J\u030a\u0065an-Poppy Seed [Example]",
+ )
+
+ def _check_mapping(self, i, expected):
+ result = _map_email_to_displayname(i)
+ self.assertEqual(result, expected)
+
@defer.inlineCallbacks
def get_or_create_user(self, requester, localpart, displayname, password_hash=None):
"""Creates a new user if the user does not exist,
diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index d9d312f0fb..8e6b0b7536 100644
--- a/tests/handlers/test_stats.py
+++ b/tests/handlers/test_stats.py
@@ -21,8 +21,12 @@ from tests import unittest
# The expected number of state events in a fresh public room.
EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM = 5
+
# The expected number of state events in a fresh private room.
-EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM = 6
+#
+# Note: we increase this by 1 on the dinsic branch as we send
+# a "im.vector.room.access_rules" state event into new private rooms
+EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM = 7
class StatsRoomTests(unittest.HomeserverTestCase):
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index 68b9847bd2..2767b0497a 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -111,7 +111,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
retry_timings_res
)
- self.datastore.get_device_updates_by_remote.return_value = (0, [])
+ self.datastore.get_device_updates_by_remote.return_value = defer.succeed(
+ (0, [])
+ )
def get_received_txn_response(*args):
return defer.succeed(None)
@@ -144,7 +146,9 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
self.datastore.get_current_state_deltas.return_value = (0, None)
self.datastore.get_to_device_stream_token = lambda: 0
- self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
+ self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: defer.succeed(
+ ([], 0)
+ )
self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
self.datastore.set_received_txn_response = lambda *args, **kwargs: defer.succeed(
None
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index b1b037006d..d31210fbe4 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -15,7 +15,7 @@ import logging
from canonicaljson import encode_canonical_json
-from synapse.events import FrozenEvent, _EventInternalMetadata
+from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.handlers.room import RoomEventSource
from synapse.replication.slave.storage.events import SlavedEventStore
@@ -90,7 +90,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
msg_dict["content"] = {}
msg_dict["unsigned"]["redacted_by"] = redaction.event_id
msg_dict["unsigned"]["redacted_because"] = redaction
- redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict())
+ redacted = make_event_from_dict(
+ msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict()
+ )
self.check("get_event", [msg.event_id], redacted)
def test_backfilled_redactions(self):
@@ -110,7 +112,9 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
msg_dict["content"] = {}
msg_dict["unsigned"]["redacted_by"] = redaction.event_id
msg_dict["unsigned"]["redacted_because"] = redaction
- redacted = FrozenEvent(msg_dict, msg.internal_metadata.get_dict())
+ redacted = make_event_from_dict(
+ msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict()
+ )
self.check("get_event", [msg.event_id], redacted)
def test_invites(self):
@@ -345,7 +349,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
if redacts is not None:
event_dict["redacts"] = redacts
- event = FrozenEvent(event_dict, internal_metadata_dict=internal)
+ event = make_event_from_dict(event_dict, internal_metadata_dict=internal)
self.event_id += 1
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index 8f09f51c61..2c9cbddeab 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -407,7 +407,13 @@ class UserRestTestCase(unittest.HomeserverTestCase):
"""
self.hs.config.registration_shared_secret = None
- body = json.dumps({"password": "abc123", "admin": True})
+ body = json.dumps(
+ {
+ "password": "abc123",
+ "admin": True,
+ "threepids": [{"medium": "email", "address": "bob@bob.bob"}],
+ }
+ )
# Create user
request, channel = self.make_request(
@@ -421,6 +427,8 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertEqual(201, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
self.assertEqual("bob", channel.json_body["displayname"])
+ self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+ self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"])
# Get user
request, channel = self.make_request(
@@ -449,7 +457,13 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
# Modify user
- body = json.dumps({"displayname": "foobar", "deactivated": True})
+ body = json.dumps(
+ {
+ "displayname": "foobar",
+ "deactivated": True,
+ "threepids": [{"medium": "email", "address": "bob2@bob.bob"}],
+ }
+ )
request, channel = self.make_request(
"PUT",
@@ -461,8 +475,9 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
- self.assertEqual("foobar", channel.json_body["displayname"])
+ self.assertEqual(None, channel.json_body["displayname"]) # deactivating a user removes their displayname
self.assertEqual(True, channel.json_body["deactivated"])
+ # the user is deactivated, the threepid will be deleted
# Get user
request, channel = self.make_request(
@@ -472,7 +487,7 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual("@bob:test", channel.json_body["name"])
- self.assertEqual("foobar", channel.json_body["displayname"])
+ self.assertEqual(None, channel.json_body["displayname"])
self.assertEqual(1, channel.json_body["admin"])
self.assertEqual(0, channel.json_body["is_guest"])
self.assertEqual(1, channel.json_body["deactivated"])
diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py
index c973521907..e163a46f6b 100644
--- a/tests/rest/client/test_identity.py
+++ b/tests/rest/client/test_identity.py
@@ -15,15 +15,22 @@
import json
+from mock import Mock
+
+from twisted.internet import defer
+
import synapse.rest.admin
from synapse.rest.client.v1 import login, room
+from synapse.rest.client.v2_alpha import account
from tests import unittest
-class IdentityTestCase(unittest.HomeserverTestCase):
+class IdentityDisabledTestCase(unittest.HomeserverTestCase):
+ """Tests that 3PID lookup attempts fail when the HS's config disallows them."""
servlets = [
+ account.register_servlets,
synapse.rest.admin.register_servlets_for_client_rest_resource,
room.register_servlets,
login.register_servlets,
@@ -32,24 +39,113 @@ class IdentityTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
config = self.default_config()
+ config["trusted_third_party_id_servers"] = ["testis"]
config["enable_3pid_lookup"] = False
self.hs = self.setup_test_homeserver(config=config)
return self.hs
+ def prepare(self, reactor, clock, hs):
+ self.user_id = self.register_user("kermit", "monkey")
+ self.tok = self.login("kermit", "monkey")
+
+ def test_3pid_invite_disabled(self):
+ request, channel = self.make_request(
+ b"POST", "/createRoom", b"{}", access_token=self.tok
+ )
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"200", channel.result)
+ room_id = channel.json_body["room_id"]
+
+ params = {
+ "id_server": "testis",
+ "medium": "email",
+ "address": "test@example.com",
+ }
+ request_data = json.dumps(params)
+ request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii")
+ request, channel = self.make_request(
+ b"POST", request_url, request_data, access_token=self.tok
+ )
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"403", channel.result)
+
def test_3pid_lookup_disabled(self):
- self.hs.config.enable_3pid_lookup = False
+ url = (
+ "/_matrix/client/unstable/account/3pid/lookup"
+ "?id_server=testis&medium=email&address=foo@bar.baz"
+ )
+ request, channel = self.make_request("GET", url, access_token=self.tok)
+ self.render(request)
+ self.assertEqual(channel.result["code"], b"403", channel.result)
- self.register_user("kermit", "monkey")
- tok = self.login("kermit", "monkey")
+ def test_3pid_bulk_lookup_disabled(self):
+ url = "/_matrix/client/unstable/account/3pid/bulk_lookup"
+ data = {
+ "id_server": "testis",
+ "threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]],
+ }
+ request_data = json.dumps(data)
+ request, channel = self.make_request(
+ "POST", url, request_data, access_token=self.tok
+ )
+ self.render(request)
+ self.assertEqual(channel.result["code"], b"403", channel.result)
+
+
+class IdentityEnabledTestCase(unittest.HomeserverTestCase):
+ """Tests that 3PID lookup attempts succeed when the HS's config allows them."""
+ servlets = [
+ account.register_servlets,
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+
+ config = self.default_config()
+ config["enable_3pid_lookup"] = True
+ config["trusted_third_party_id_servers"] = ["testis"]
+
+ mock_http_client = Mock(spec=["get_json", "post_json_get_json"])
+ mock_http_client.get_json.return_value = defer.succeed((200, "{}"))
+ mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}"))
+
+ self.hs = self.setup_test_homeserver(
+ config=config, simple_http_client=mock_http_client
+ )
+
+ # TODO: This class does not use a singleton to get it's http client
+ # This should be fixed for easier testing
+ # https://github.com/matrix-org/synapse-dinsic/issues/26
+ self.hs.get_handlers().identity_handler.http_client = (
+ mock_http_client
+ )
+
+ return self.hs
+
+ def prepare(self, reactor, clock, hs):
+ self.user_id = self.register_user("kermit", "monkey")
+ self.tok = self.login("kermit", "monkey")
+
+ def test_3pid_invite_enabled(self):
request, channel = self.make_request(
- b"POST", "/createRoom", b"{}", access_token=tok
+ b"POST", "/createRoom", b"{}", access_token=self.tok
)
self.render(request)
self.assertEquals(channel.result["code"], b"200", channel.result)
room_id = channel.json_body["room_id"]
+ # Replace the blacklisting SimpleHttpClient with our mock
+ self.hs.get_room_member_handler().simple_http_client = Mock(
+ spec=["get_json", "post_json_get_json"]
+ )
+ self.hs.get_room_member_handler().simple_http_client.get_json.return_value = (
+ defer.succeed((200, "{}"))
+ )
+
params = {
"id_server": "testis",
"medium": "email",
@@ -58,7 +154,44 @@ class IdentityTestCase(unittest.HomeserverTestCase):
request_data = json.dumps(params)
request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii")
request, channel = self.make_request(
- b"POST", request_url, request_data, access_token=tok
+ b"POST", request_url, request_data, access_token=self.tok
)
self.render(request)
- self.assertEquals(channel.result["code"], b"403", channel.result)
+
+ get_json = self.hs.get_handlers().identity_handler.http_client.get_json
+ get_json.assert_called_once_with(
+ "https://testis/_matrix/identity/api/v1/lookup",
+ {"address": "test@example.com", "medium": "email"},
+ )
+
+ def test_3pid_lookup_enabled(self):
+ url = (
+ "/_matrix/client/unstable/account/3pid/lookup"
+ "?id_server=testis&medium=email&address=foo@bar.baz"
+ )
+ request, channel = self.make_request("GET", url, access_token=self.tok)
+ self.render(request)
+
+ get_json = self.hs.get_simple_http_client().get_json
+ get_json.assert_called_once_with(
+ "https://testis/_matrix/identity/api/v1/lookup",
+ {"address": "foo@bar.baz", "medium": "email"},
+ )
+
+ def test_3pid_bulk_lookup_enabled(self):
+ url = "/_matrix/client/unstable/account/3pid/bulk_lookup"
+ data = {
+ "id_server": "testis",
+ "threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]],
+ }
+ request_data = json.dumps(data)
+ request, channel = self.make_request(
+ "POST", url, request_data, access_token=self.tok
+ )
+ self.render(request)
+
+ post_json = self.hs.get_simple_http_client().post_json_get_json
+ post_json.assert_called_once_with(
+ "https://testis/_matrix/identity/api/v1/bulk_lookup",
+ {"threepids": [["email", "foo@bar.baz"], ["email", "john.doe@matrix.org"]]},
+ )
diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py
index 95475bb651..9e549d8a91 100644
--- a/tests/rest/client/test_retention.py
+++ b/tests/rest/client/test_retention.py
@@ -34,6 +34,7 @@ class RetentionTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
config = self.default_config()
+ config["default_room_version"] = "1"
config["retention"] = {
"enabled": True,
"default_policy": {
@@ -203,6 +204,7 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase):
def make_homeserver(self, reactor, clock):
config = self.default_config()
+ config["default_room_version"] = "1"
config["retention"] = {
"enabled": True,
}
diff --git a/tests/rest/client/test_room_access_rules.py b/tests/rest/client/test_room_access_rules.py
new file mode 100644
index 0000000000..f10ae0adeb
--- /dev/null
+++ b/tests/rest/client/test_room_access_rules.py
@@ -0,0 +1,726 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import json
+import random
+import string
+
+from mock import Mock
+
+from twisted.internet import defer
+from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
+from synapse.rest import admin
+from synapse.rest.client.v1 import login, room
+from synapse.third_party_rules.access_rules import (
+ ACCESS_RULE_DIRECT,
+ ACCESS_RULE_RESTRICTED,
+ ACCESS_RULE_UNRESTRICTED,
+ ACCESS_RULES_TYPE,
+)
+
+from tests import unittest
+
+
+class RoomAccessTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+
+ config["third_party_event_rules"] = {
+ "module": "synapse.third_party_rules.access_rules.RoomAccessRules",
+ "config": {
+ "domains_forbidden_when_restricted": ["forbidden_domain"],
+ "id_server": "testis",
+ },
+ }
+ config["trusted_third_party_id_servers"] = ["testis"]
+
+ def send_invite(destination, room_id, event_id, pdu):
+ return defer.succeed(pdu)
+
+ def get_json(uri, args={}, headers=None):
+ address_domain = args["address"].split("@")[1]
+ return defer.succeed({"hs": address_domain})
+
+ def post_json_get_json(uri, post_json, args={}, headers=None):
+ token = "".join(random.choice(string.ascii_letters) for _ in range(10))
+ return defer.succeed(
+ {
+ "token": token,
+ "public_keys": [
+ {
+ "public_key": "serverpublickey",
+ "key_validity_url": "https://testis/pubkey/isvalid",
+ },
+ {
+ "public_key": "phemeralpublickey",
+ "key_validity_url": "https://testis/pubkey/ephemeral/isvalid",
+ },
+ ],
+ "display_name": "f...@b...",
+ }
+ )
+
+ mock_federation_client = Mock(spec=["send_invite"])
+ mock_federation_client.send_invite.side_effect = send_invite
+
+ mock_http_client = Mock(
+ spec=["get_json", "post_json_get_json"],
+ )
+ # Mocking the response for /info on the IS API.
+ mock_http_client.get_json.side_effect = get_json
+ # Mocking the response for /store-invite on the IS API.
+ mock_http_client.post_json_get_json.side_effect = post_json_get_json
+ self.hs = self.setup_test_homeserver(
+ config=config,
+ federation_client=mock_federation_client,
+ simple_http_client=mock_http_client,
+ )
+
+ # TODO: This class does not use a singleton to get it's http client
+ # This should be fixed for easier testing
+ # https://github.com/matrix-org/synapse-dinsic/issues/26
+ self.hs.get_handlers().identity_handler.blacklisting_http_client = mock_http_client
+
+ return self.hs
+
+ def prepare(self, reactor, clock, homeserver):
+ self.user_id = self.register_user("kermit", "monkey")
+ self.tok = self.login("kermit", "monkey")
+
+ self.restricted_room = self.create_room()
+ self.unrestricted_room = self.create_room(rule=ACCESS_RULE_UNRESTRICTED)
+ self.direct_rooms = [
+ self.create_room(direct=True),
+ self.create_room(direct=True),
+ self.create_room(direct=True),
+ ]
+
+ self.invitee_id = self.register_user("invitee", "test")
+ self.invitee_tok = self.login("invitee", "test")
+
+ self.helper.invite(
+ room=self.direct_rooms[0],
+ src=self.user_id,
+ targ=self.invitee_id,
+ tok=self.tok,
+ )
+
+ def test_create_room_no_rule(self):
+ """Tests that creating a room with no rule will set the default value."""
+ room_id = self.create_room()
+ rule = self.current_rule_in_room(room_id)
+
+ self.assertEqual(rule, ACCESS_RULE_RESTRICTED)
+
+ def test_create_room_direct_no_rule(self):
+ """Tests that creating a direct room with no rule will set the default value."""
+ room_id = self.create_room(direct=True)
+ rule = self.current_rule_in_room(room_id)
+
+ self.assertEqual(rule, ACCESS_RULE_DIRECT)
+
+ def test_create_room_valid_rule(self):
+ """Tests that creating a room with a valid rule will set the right value."""
+ room_id = self.create_room(rule=ACCESS_RULE_UNRESTRICTED)
+ rule = self.current_rule_in_room(room_id)
+
+ self.assertEqual(rule, ACCESS_RULE_UNRESTRICTED)
+
+ def test_create_room_invalid_rule(self):
+ """Tests that creating a room with an invalid rule will set fail."""
+ self.create_room(rule=ACCESS_RULE_DIRECT, expected_code=400)
+
+ def test_create_room_direct_invalid_rule(self):
+ """Tests that creating a direct room with an invalid rule will fail.
+ """
+ self.create_room(direct=True, rule=ACCESS_RULE_RESTRICTED, expected_code=400)
+
+ def test_public_room(self):
+ """Tests that it's not possible to have a room with the public join rule and an
+ access rule that's not restricted.
+ """
+ # Creating a room with the public_chat preset should succeed and set the access
+ # rule to restricted.
+ preset_room_id = self.create_room(preset=RoomCreationPreset.PUBLIC_CHAT)
+ self.assertEqual(
+ self.current_rule_in_room(preset_room_id), ACCESS_RULE_RESTRICTED
+ )
+
+ # Creating a room with the public join rule in its initial state should succeed
+ # and set the access rule to restricted.
+ init_state_room_id = self.create_room(
+ initial_state=[
+ {
+ "type": "m.room.join_rules",
+ "content": {"join_rule": JoinRules.PUBLIC},
+ }
+ ]
+ )
+ self.assertEqual(
+ self.current_rule_in_room(init_state_room_id), ACCESS_RULE_RESTRICTED
+ )
+
+ # Changing access rule to unrestricted should fail.
+ self.change_rule_in_room(
+ preset_room_id, ACCESS_RULE_UNRESTRICTED, expected_code=403
+ )
+ self.change_rule_in_room(
+ init_state_room_id, ACCESS_RULE_UNRESTRICTED, expected_code=403
+ )
+
+ # Changing access rule to direct should fail.
+ self.change_rule_in_room(preset_room_id, ACCESS_RULE_DIRECT, expected_code=403)
+ self.change_rule_in_room(
+ init_state_room_id, ACCESS_RULE_DIRECT, expected_code=403
+ )
+
+ # Changing join rule to public in an unrestricted room should fail.
+ self.change_join_rule_in_room(
+ self.unrestricted_room, JoinRules.PUBLIC, expected_code=403
+ )
+ # Changing join rule to public in an direct room should fail.
+ self.change_join_rule_in_room(
+ self.direct_rooms[0], JoinRules.PUBLIC, expected_code=403
+ )
+
+ # Creating a new room with the public_chat preset and an access rule that isn't
+ # restricted should fail.
+ self.create_room(
+ preset=RoomCreationPreset.PUBLIC_CHAT,
+ rule=ACCESS_RULE_UNRESTRICTED,
+ expected_code=400,
+ )
+ self.create_room(
+ preset=RoomCreationPreset.PUBLIC_CHAT,
+ rule=ACCESS_RULE_DIRECT,
+ expected_code=400,
+ )
+
+ # Creating a room with the public join rule in its initial state and an access
+ # rule that isn't restricted should fail.
+ self.create_room(
+ initial_state=[
+ {
+ "type": "m.room.join_rules",
+ "content": {"join_rule": JoinRules.PUBLIC},
+ }
+ ],
+ rule=ACCESS_RULE_UNRESTRICTED,
+ expected_code=400,
+ )
+ self.create_room(
+ initial_state=[
+ {
+ "type": "m.room.join_rules",
+ "content": {"join_rule": JoinRules.PUBLIC},
+ }
+ ],
+ rule=ACCESS_RULE_DIRECT,
+ expected_code=400,
+ )
+
+ def test_restricted(self):
+ """Tests that in restricted mode we're unable to invite users from blacklisted
+ servers but can invite other users.
+ """
+ # We can't invite a user from a forbidden HS.
+ self.helper.invite(
+ room=self.restricted_room,
+ src=self.user_id,
+ targ="@test:forbidden_domain",
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ # We can invite a user which HS isn't forbidden.
+ self.helper.invite(
+ room=self.restricted_room,
+ src=self.user_id,
+ targ="@test:allowed_domain",
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ # We can't send a 3PID invite to an address that is mapped to a forbidden HS.
+ self.send_threepid_invite(
+ address="test@forbidden_domain",
+ room_id=self.restricted_room,
+ expected_code=403,
+ )
+
+ # We can send a 3PID invite to an address that is mapped to an HS that's not
+ # forbidden.
+ self.send_threepid_invite(
+ address="test@allowed_domain",
+ room_id=self.restricted_room,
+ expected_code=200,
+ )
+
+ def test_direct(self):
+ """Tests that, in direct mode, other users than the initial two can't be invited,
+ but the following scenario works:
+ * invited user joins the room
+ * invited user leaves the room
+ * room creator re-invites invited user
+ Also tests that a user from a HS that's in the list of forbidden domains (to use
+ in restricted mode) can be invited.
+ """
+ not_invited_user = "@not_invited:forbidden_domain"
+
+ # We can't invite a new user to the room.
+ self.helper.invite(
+ room=self.direct_rooms[0],
+ src=self.user_id,
+ targ=not_invited_user,
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ # The invited user can join the room.
+ self.helper.join(
+ room=self.direct_rooms[0],
+ user=self.invitee_id,
+ tok=self.invitee_tok,
+ expect_code=200,
+ )
+
+ # The invited user can leave the room.
+ self.helper.leave(
+ room=self.direct_rooms[0],
+ user=self.invitee_id,
+ tok=self.invitee_tok,
+ expect_code=200,
+ )
+
+ # The invited user can be re-invited to the room.
+ self.helper.invite(
+ room=self.direct_rooms[0],
+ src=self.user_id,
+ targ=self.invitee_id,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ # If we're alone in the room and have always been the only member, we can invite
+ # someone.
+ self.helper.invite(
+ room=self.direct_rooms[1],
+ src=self.user_id,
+ targ=not_invited_user,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ # Disable the 3pid invite ratelimiter
+ burst = self.hs.config.rc_third_party_invite.burst_count
+ per_second = self.hs.config.rc_third_party_invite.per_second
+ self.hs.config.rc_third_party_invite.burst_count = 10
+ self.hs.config.rc_third_party_invite.per_second = 0.1
+
+ # We can't send a 3PID invite to a room that already has two members.
+ self.send_threepid_invite(
+ address="test@allowed_domain",
+ room_id=self.direct_rooms[0],
+ expected_code=403,
+ )
+
+ # We can't send a 3PID invite to a room that already has a pending invite.
+ self.send_threepid_invite(
+ address="test@allowed_domain",
+ room_id=self.direct_rooms[1],
+ expected_code=403,
+ )
+
+ # We can send a 3PID invite to a room in which we've always been the only member.
+ self.send_threepid_invite(
+ address="test@forbidden_domain",
+ room_id=self.direct_rooms[2],
+ expected_code=200,
+ )
+
+ # We can send a 3PID invite to a room in which there's a 3PID invite.
+ self.send_threepid_invite(
+ address="test@forbidden_domain",
+ room_id=self.direct_rooms[2],
+ expected_code=403,
+ )
+
+ self.hs.config.rc_third_party_invite.burst_count = burst
+ self.hs.config.rc_third_party_invite.per_second = per_second
+
+ def test_unrestricted(self):
+ """Tests that, in unrestricted mode, we can invite whoever we want, but we can
+ only change the power level of users that wouldn't be forbidden in restricted
+ mode.
+ """
+ # We can invite
+ self.helper.invite(
+ room=self.unrestricted_room,
+ src=self.user_id,
+ targ="@test:forbidden_domain",
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.invite(
+ room=self.unrestricted_room,
+ src=self.user_id,
+ targ="@test:not_forbidden_domain",
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ # We can send a 3PID invite to an address that is mapped to a forbidden HS.
+ self.send_threepid_invite(
+ address="test@forbidden_domain",
+ room_id=self.unrestricted_room,
+ expected_code=200,
+ )
+
+ # We can send a 3PID invite to an address that is mapped to an HS that's not
+ # forbidden.
+ self.send_threepid_invite(
+ address="test@allowed_domain",
+ room_id=self.unrestricted_room,
+ expected_code=200,
+ )
+
+ # We can send a power level event that doesn't redefine the default PL or set a
+ # non-default PL for a user that would be forbidden in restricted mode.
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.PowerLevels,
+ body={"users": {self.user_id: 100, "@test:not_forbidden_domain": 10}},
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ # We can't send a power level event that redefines the default PL and doesn't set
+ # a non-default PL for a user that would be forbidden in restricted mode.
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.PowerLevels,
+ body={
+ "users": {self.user_id: 100, "@test:not_forbidden_domain": 10},
+ "users_default": 10,
+ },
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ # We can't send a power level event that doesn't redefines the default PL but sets
+ # a non-default PL for a user that would be forbidden in restricted mode.
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.PowerLevels,
+ body={"users": {self.user_id: 100, "@test:forbidden_domain": 10}},
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ def test_change_rules(self):
+ """Tests that we can only change the current rule from restricted to
+ unrestricted.
+ """
+ # We can change the rule from restricted to unrestricted.
+ self.change_rule_in_room(
+ room_id=self.restricted_room,
+ new_rule=ACCESS_RULE_UNRESTRICTED,
+ expected_code=200,
+ )
+
+ # We can't change the rule from restricted to direct.
+ self.change_rule_in_room(
+ room_id=self.restricted_room, new_rule=ACCESS_RULE_DIRECT, expected_code=403
+ )
+
+ # We can't change the rule from unrestricted to restricted.
+ self.change_rule_in_room(
+ room_id=self.unrestricted_room,
+ new_rule=ACCESS_RULE_RESTRICTED,
+ expected_code=403,
+ )
+
+ # We can't change the rule from unrestricted to direct.
+ self.change_rule_in_room(
+ room_id=self.unrestricted_room,
+ new_rule=ACCESS_RULE_DIRECT,
+ expected_code=403,
+ )
+
+ # We can't change the rule from direct to restricted.
+ self.change_rule_in_room(
+ room_id=self.direct_rooms[0],
+ new_rule=ACCESS_RULE_RESTRICTED,
+ expected_code=403,
+ )
+
+ # We can't change the rule from direct to unrestricted.
+ self.change_rule_in_room(
+ room_id=self.direct_rooms[0],
+ new_rule=ACCESS_RULE_UNRESTRICTED,
+ expected_code=403,
+ )
+
+ def test_change_room_avatar(self):
+ """Tests that changing the room avatar is always allowed unless the room is a
+ direct chat, in which case it's forbidden.
+ """
+
+ avatar_content = {
+ "info": {"h": 398, "mimetype": "image/jpeg", "size": 31037, "w": 394},
+ "url": "mxc://example.org/JWEIFJgwEIhweiWJE",
+ }
+
+ self.helper.send_state(
+ room_id=self.restricted_room,
+ event_type=EventTypes.RoomAvatar,
+ body=avatar_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.RoomAvatar,
+ body=avatar_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.direct_rooms[0],
+ event_type=EventTypes.RoomAvatar,
+ body=avatar_content,
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ def test_change_room_name(self):
+ """Tests that changing the room name is always allowed unless the room is a direct
+ chat, in which case it's forbidden.
+ """
+
+ name_content = {"name": "My super room"}
+
+ self.helper.send_state(
+ room_id=self.restricted_room,
+ event_type=EventTypes.Name,
+ body=name_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.Name,
+ body=name_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.direct_rooms[0],
+ event_type=EventTypes.Name,
+ body=name_content,
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ def test_change_room_topic(self):
+ """Tests that changing the room topic is always allowed unless the room is a
+ direct chat, in which case it's forbidden.
+ """
+
+ topic_content = {"topic": "Welcome to this room"}
+
+ self.helper.send_state(
+ room_id=self.restricted_room,
+ event_type=EventTypes.Topic,
+ body=topic_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.unrestricted_room,
+ event_type=EventTypes.Topic,
+ body=topic_content,
+ tok=self.tok,
+ expect_code=200,
+ )
+
+ self.helper.send_state(
+ room_id=self.direct_rooms[0],
+ event_type=EventTypes.Topic,
+ body=topic_content,
+ tok=self.tok,
+ expect_code=403,
+ )
+
+ def test_revoke_3pid_invite_direct(self):
+ """Tests that revoking a 3PID invite doesn't cause the room access rules module to
+ confuse the revokation as a new 3PID invite.
+ """
+ invite_token = "sometoken"
+
+ invite_body = {
+ "display_name": "ker...@exa...",
+ "public_keys": [
+ {
+ "key_validity_url": "https://validity_url",
+ "public_key": "ta8IQ0u1sp44HVpxYi7dFOdS/bfwDjcy4xLFlfY5KOA",
+ },
+ {
+ "key_validity_url": "https://validity_url",
+ "public_key": "4_9nzEeDwR5N9s51jPodBiLnqH43A2_g2InVT137t9I",
+ },
+ ],
+ "key_validity_url": "https://validity_url",
+ "public_key": "ta8IQ0u1sp44HVpxYi7dFOdS/bfwDjcy4xLFlfY5KOA",
+ }
+
+ self.send_state_with_state_key(
+ room_id=self.direct_rooms[1],
+ event_type=EventTypes.ThirdPartyInvite,
+ state_key=invite_token,
+ body=invite_body,
+ tok=self.tok,
+ )
+
+ self.send_state_with_state_key(
+ room_id=self.direct_rooms[1],
+ event_type=EventTypes.ThirdPartyInvite,
+ state_key=invite_token,
+ body={},
+ tok=self.tok,
+ )
+
+ invite_token = "someothertoken"
+
+ self.send_state_with_state_key(
+ room_id=self.direct_rooms[1],
+ event_type=EventTypes.ThirdPartyInvite,
+ state_key=invite_token,
+ body=invite_body,
+ tok=self.tok,
+ )
+
+ def create_room(
+ self,
+ direct=False,
+ rule=None,
+ preset=RoomCreationPreset.TRUSTED_PRIVATE_CHAT,
+ initial_state=None,
+ expected_code=200,
+ ):
+ content = {"is_direct": direct, "preset": preset}
+
+ if rule:
+ content["initial_state"] = [
+ {"type": ACCESS_RULES_TYPE, "state_key": "", "content": {"rule": rule}}
+ ]
+
+ if initial_state:
+ if "initial_state" not in content:
+ content["initial_state"] = []
+
+ content["initial_state"] += initial_state
+
+ request, channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/createRoom",
+ json.dumps(content),
+ access_token=self.tok,
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, expected_code, channel.result)
+
+ if expected_code == 200:
+ return channel.json_body["room_id"]
+
+ def current_rule_in_room(self, room_id):
+ request, channel = self.make_request(
+ "GET",
+ "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, ACCESS_RULES_TYPE),
+ access_token=self.tok,
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, 200, channel.result)
+ return channel.json_body["rule"]
+
+ def change_rule_in_room(self, room_id, new_rule, expected_code=200):
+ data = {"rule": new_rule}
+ request, channel = self.make_request(
+ "PUT",
+ "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, ACCESS_RULES_TYPE),
+ json.dumps(data),
+ access_token=self.tok,
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, expected_code, channel.result)
+
+ def change_join_rule_in_room(self, room_id, new_join_rule, expected_code=200):
+ data = {"join_rule": new_join_rule}
+ request, channel = self.make_request(
+ "PUT",
+ "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, EventTypes.JoinRules),
+ json.dumps(data),
+ access_token=self.tok,
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, expected_code, channel.result)
+
+ def send_threepid_invite(self, address, room_id, expected_code=200):
+ params = {"id_server": "testis", "medium": "email", "address": address}
+
+ request, channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/rooms/%s/invite" % room_id,
+ json.dumps(params),
+ access_token=self.tok,
+ )
+ self.render(request)
+ self.assertEqual(channel.code, expected_code, channel.result)
+
+ def send_state_with_state_key(
+ self, room_id, event_type, state_key, body, tok, expect_code=200
+ ):
+ path = "/_matrix/client/r0/rooms/%s/state/%s/%s" % (
+ room_id,
+ event_type,
+ state_key,
+ )
+
+ request, channel = self.make_request(
+ "PUT", path, json.dumps(body), access_token=tok
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, expect_code, channel.result)
+
+ return channel.json_body
diff --git a/tests/rest/client/v2_alpha/test_password_policy.py b/tests/rest/client/v2_alpha/test_password_policy.py
new file mode 100644
index 0000000000..37f970c6b0
--- /dev/null
+++ b/tests/rest/client/v2_alpha/test_password_policy.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+from synapse.api.constants import LoginType
+from synapse.api.errors import Codes
+from synapse.rest import admin
+from synapse.rest.client.v1 import login
+from synapse.rest.client.v2_alpha import account, password_policy, register
+
+from tests import unittest
+
+
+class PasswordPolicyTestCase(unittest.HomeserverTestCase):
+ """Tests the password policy feature and its compliance with MSC2000.
+
+ When validating a password, Synapse does the necessary checks in this order:
+
+ 1. Password is long enough
+ 2. Password contains digit(s)
+ 3. Password contains symbol(s)
+ 4. Password contains uppercase letter(s)
+ 5. Password contains lowercase letter(s)
+
+ Therefore, each test in this test case that tests whether a password triggers the
+ right error code to be returned provides a password good enough to pass the previous
+ steps but not the one it's testing (nor any step that comes after).
+ """
+
+ servlets = [
+ admin.register_servlets_for_client_rest_resource,
+ login.register_servlets,
+ register.register_servlets,
+ password_policy.register_servlets,
+ account.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ self.register_url = "/_matrix/client/r0/register"
+ self.policy = {
+ "enabled": True,
+ "minimum_length": 10,
+ "require_digit": True,
+ "require_symbol": True,
+ "require_lowercase": True,
+ "require_uppercase": True,
+ }
+
+ config = self.default_config()
+ config["password_config"] = {"policy": self.policy}
+
+ hs = self.setup_test_homeserver(config=config)
+ return hs
+
+ def test_get_policy(self):
+ """Tests if the /password_policy endpoint returns the configured policy."""
+
+ request, channel = self.make_request(
+ "GET", "/_matrix/client/r0/password_policy"
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, 200, channel.result)
+ self.assertEqual(
+ channel.json_body,
+ {
+ "m.minimum_length": 10,
+ "m.require_digit": True,
+ "m.require_symbol": True,
+ "m.require_lowercase": True,
+ "m.require_uppercase": True,
+ },
+ channel.result,
+ )
+
+ def test_password_too_short(self):
+ request_data = json.dumps({"username": "kermit", "password": "shorty"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.PASSWORD_TOO_SHORT, channel.result
+ )
+
+ def test_password_no_digit(self):
+ request_data = json.dumps({"username": "kermit", "password": "longerpassword"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT, channel.result
+ )
+
+ def test_password_no_symbol(self):
+ request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.PASSWORD_NO_SYMBOL, channel.result
+ )
+
+ def test_password_no_uppercase(self):
+ request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword!"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.PASSWORD_NO_UPPERCASE, channel.result
+ )
+
+ def test_password_no_lowercase(self):
+ request_data = json.dumps({"username": "kermit", "password": "L0NGERPASSWORD!"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(
+ channel.json_body["errcode"], Codes.PASSWORD_NO_LOWERCASE, channel.result
+ )
+
+ def test_password_compliant(self):
+ request_data = json.dumps({"username": "kermit", "password": "L0ngerpassword!"})
+ request, channel = self.make_request("POST", self.register_url, request_data)
+ self.render(request)
+
+ # Getting a 401 here means the password has passed validation and the server has
+ # responded with a list of registration flows.
+ self.assertEqual(channel.code, 401, channel.result)
+
+ def test_password_change(self):
+ """This doesn't test every possible use case, only that hitting /account/password
+ triggers the password validation code.
+ """
+ compliant_password = "C0mpl!antpassword"
+ not_compliant_password = "notcompliantpassword"
+
+ user_id = self.register_user("kermit", compliant_password)
+ tok = self.login("kermit", compliant_password)
+
+ request_data = json.dumps(
+ {
+ "new_password": not_compliant_password,
+ "auth": {
+ "password": compliant_password,
+ "type": LoginType.PASSWORD,
+ "user": user_id,
+ },
+ }
+ )
+ request, channel = self.make_request(
+ "POST",
+ "/_matrix/client/r0/account/password",
+ request_data,
+ access_token=tok,
+ )
+ self.render(request)
+
+ self.assertEqual(channel.code, 400, channel.result)
+ self.assertEqual(channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT)
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index d0c997e385..d99b100d0f 100644
--- a/tests/rest/client/v2_alpha/test_register.py
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -19,8 +19,12 @@ import datetime
import json
import os
+from mock import Mock
+
import pkg_resources
+from twisted.internet import defer
+
import synapse.rest.admin
from synapse.api.constants import LoginType
from synapse.api.errors import Codes
@@ -261,6 +265,47 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
)
+class RegisterHideProfileTestCase(unittest.HomeserverTestCase):
+
+ servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource]
+
+ def make_homeserver(self, reactor, clock):
+
+ self.url = b"/_matrix/client/r0/register"
+
+ config = self.default_config()
+ config["enable_registration"] = True
+ config["show_users_in_user_directory"] = False
+ config["replicate_user_profiles_to"] = ["fakeserver"]
+
+ mock_http_client = Mock(spec=["get_json", "post_json_get_json"])
+ mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}"))
+
+ self.hs = self.setup_test_homeserver(
+ config=config, simple_http_client=mock_http_client
+ )
+
+ return self.hs
+
+ def test_profile_hidden(self):
+ user_id = self.register_user("kermit", "monkey")
+
+ post_json = self.hs.get_simple_http_client().post_json_get_json
+
+ # We expect post_json_get_json to have been called twice: once with the original
+ # profile and once with the None profile resulting from the request to hide it
+ # from the user directory.
+ self.assertEqual(post_json.call_count, 2, post_json.call_args_list)
+
+ # Get the args (and not kwargs) passed to post_json.
+ args = post_json.call_args[0]
+ # Make sure the last call was attempting to replicate profiles.
+ split_uri = args[0].split("/")
+ self.assertEqual(split_uri[len(split_uri) - 1], "replicate_profiles", args[0])
+ # Make sure the last profile update was overriding the user's profile to None.
+ self.assertEqual(args[1]["batch"][user_id], None, args[1])
+
+
class AccountValidityTestCase(unittest.HomeserverTestCase):
servlets = [
@@ -269,6 +314,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
login.register_servlets,
sync.register_servlets,
account_validity.register_servlets,
+ account.register_servlets,
]
def make_homeserver(self, reactor, clock):
@@ -361,6 +407,138 @@ class AccountValidityTestCase(unittest.HomeserverTestCase):
)
+class AccountValidityUserDirectoryTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ synapse.rest.client.v1.profile.register_servlets,
+ synapse.rest.client.v1.room.register_servlets,
+ synapse.rest.client.v2_alpha.user_directory.register_servlets,
+ login.register_servlets,
+ register.register_servlets,
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ account_validity.register_servlets,
+ ]
+
+ def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+
+ # Set accounts to expire after a week
+ config["enable_registration"] = True
+ config["account_validity"] = {
+ "enabled": True,
+ "period": 604800000, # Time in ms for 1 week
+ }
+ config["replicate_user_profiles_to"] = "test.is"
+
+ # Mock homeserver requests to an identity server
+ mock_http_client = Mock(spec=["post_json_get_json"])
+ mock_http_client.post_json_get_json.return_value = defer.succeed((200, "{}"))
+
+ self.hs = self.setup_test_homeserver(
+ config=config, simple_http_client=mock_http_client
+ )
+
+ return self.hs
+
+ def test_expired_user_in_directory(self):
+ """Test that an expired user is hidden in the user directory"""
+ # Create an admin user to search the user directory
+ admin_id = self.register_user("admin", "adminpassword", admin=True)
+ admin_tok = self.login("admin", "adminpassword")
+
+ # Ensure the admin never expires
+ url = "/_matrix/client/unstable/admin/account_validity/validity"
+ params = {
+ "user_id": admin_id,
+ "expiration_ts": 999999999999,
+ "enable_renewal_emails": False,
+ }
+ request_data = json.dumps(params)
+ request, channel = self.make_request(
+ b"POST", url, request_data, access_token=admin_tok
+ )
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"200", channel.result)
+
+ # Create a user to expire
+ username = "kermit"
+ user_id = self.register_user(username, "monkey")
+ self.login(username, "monkey")
+
+ self.pump(1000)
+ self.reactor.advance(1000)
+ self.pump()
+
+ # Expire the user
+ url = "/_matrix/client/unstable/admin/account_validity/validity"
+ params = {
+ "user_id": user_id,
+ "expiration_ts": 0,
+ "enable_renewal_emails": False,
+ }
+ request_data = json.dumps(params)
+ request, channel = self.make_request(
+ b"POST", url, request_data, access_token=admin_tok
+ )
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"200", channel.result)
+
+ # Wait for the background job to run which hides expired users in the directory
+ self.pump(60 * 60 * 1000)
+
+ # Mock the homeserver's HTTP client
+ post_json = self.hs.get_simple_http_client().post_json_get_json
+
+ # Check if the homeserver has replicated the user's profile to the identity server
+ self.assertNotEquals(post_json.call_args, None, post_json.call_args)
+ payload = post_json.call_args[0][1]
+ batch = payload.get("batch")
+ self.assertNotEquals(batch, None, batch)
+ self.assertEquals(len(batch), 1, batch)
+ replicated_user_id = list(batch.keys())[0]
+ self.assertEquals(replicated_user_id, user_id, replicated_user_id)
+
+ # There was replicated information about our user
+ # Check that it's None, signifying that the user should be removed from the user
+ # directory because they were expired
+ replicated_content = batch[user_id]
+ self.assertIsNone(replicated_content)
+
+ # Now renew the user, and check they get replicated again to the identity server
+ url = "/_matrix/client/unstable/admin/account_validity/validity"
+ params = {
+ "user_id": user_id,
+ "expiration_ts": 99999999999,
+ "enable_renewal_emails": False,
+ }
+ request_data = json.dumps(params)
+ request, channel = self.make_request(
+ b"POST", url, request_data, access_token=admin_tok
+ )
+ self.render(request)
+ self.assertEquals(channel.result["code"], b"200", channel.result)
+
+ self.pump(10)
+ self.reactor.advance(10)
+ self.pump()
+
+ # Check if the homeserver has replicated the user's profile to the identity server
+ post_json = self.hs.get_simple_http_client().post_json_get_json
+ self.assertNotEquals(post_json.call_args, None, post_json.call_args)
+ payload = post_json.call_args[0][1]
+ batch = payload.get("batch")
+ self.assertNotEquals(batch, None, batch)
+ self.assertEquals(len(batch), 1, batch)
+ replicated_user_id = list(batch.keys())[0]
+ self.assertEquals(replicated_user_id, user_id, replicated_user_id)
+
+ # There was replicated information about our user
+ # Check that it's not None, signifying that the user is back in the user
+ # directory
+ replicated_content = batch[user_id]
+ self.assertIsNotNone(replicated_content)
+
+
class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
servlets = [
@@ -511,7 +689,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
"POST", "account/deactivate", request_data, access_token=tok
)
self.render(request)
- self.assertEqual(request.code, 200)
+ self.assertEqual(request.code, 200, channel.result)
self.reactor.advance(datetime.timedelta(days=8).total_seconds())
diff --git a/tests/rulecheck/__init__.py b/tests/rulecheck/__init__.py
new file mode 100644
index 0000000000..a354d38ca8
--- /dev/null
+++ b/tests/rulecheck/__init__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/tests/rulecheck/test_domainrulecheck.py b/tests/rulecheck/test_domainrulecheck.py
new file mode 100644
index 0000000000..1accc70dc9
--- /dev/null
+++ b/tests/rulecheck/test_domainrulecheck.py
@@ -0,0 +1,334 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 New Vector Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import json
+
+import synapse.rest.admin
+from synapse.config._base import ConfigError
+from synapse.rest.client.v1 import login, room
+from synapse.rulecheck.domain_rule_checker import DomainRuleChecker
+
+from tests import unittest
+from tests.server import make_request, render
+
+
+class DomainRuleCheckerTestCase(unittest.TestCase):
+ def test_allowed(self):
+ config = {
+ "default": False,
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ },
+ "domains_prevented_from_being_invited_to_published_rooms": ["target_two"],
+ }
+ check = DomainRuleChecker(config)
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_one", "test:target_one", None, "room", False
+ )
+ )
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_one", "test:target_two", None, "room", False
+ )
+ )
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_two", "test:target_two", None, "room", False
+ )
+ )
+
+ # User can invite internal user to a published room
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_one", "test1:target_one", None, "room", False, True
+ )
+ )
+
+ # User can invite external user to a non-published room
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_one", "test:target_two", None, "room", False, False
+ )
+ )
+
+ def test_disallowed(self):
+ config = {
+ "default": True,
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ "source_four": [],
+ },
+ }
+ check = DomainRuleChecker(config)
+ self.assertFalse(
+ check.user_may_invite(
+ "test:source_one", "test:target_three", None, "room", False
+ )
+ )
+ self.assertFalse(
+ check.user_may_invite(
+ "test:source_two", "test:target_three", None, "room", False
+ )
+ )
+ self.assertFalse(
+ check.user_may_invite(
+ "test:source_two", "test:target_one", None, "room", False
+ )
+ )
+ self.assertFalse(
+ check.user_may_invite(
+ "test:source_four", "test:target_one", None, "room", False
+ )
+ )
+
+ # User cannot invite external user to a published room
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_one", "test:target_two", None, "room", False, True
+ )
+ )
+
+ def test_default_allow(self):
+ config = {
+ "default": True,
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ },
+ }
+ check = DomainRuleChecker(config)
+ self.assertTrue(
+ check.user_may_invite(
+ "test:source_three", "test:target_one", None, "room", False
+ )
+ )
+
+ def test_default_deny(self):
+ config = {
+ "default": False,
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ },
+ }
+ check = DomainRuleChecker(config)
+ self.assertFalse(
+ check.user_may_invite(
+ "test:source_three", "test:target_one", None, "room", False
+ )
+ )
+
+ def test_config_parse(self):
+ config = {
+ "default": False,
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ },
+ }
+ self.assertEquals(config, DomainRuleChecker.parse_config(config))
+
+ def test_config_parse_failure(self):
+ config = {
+ "domain_mapping": {
+ "source_one": ["target_one", "target_two"],
+ "source_two": ["target_two"],
+ }
+ }
+ self.assertRaises(ConfigError, DomainRuleChecker.parse_config, config)
+
+
+class DomainRuleCheckerRoomTestCase(unittest.HomeserverTestCase):
+ servlets = [
+ synapse.rest.admin.register_servlets_for_client_rest_resource,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ hijack_auth = False
+
+ def make_homeserver(self, reactor, clock):
+ config = self.default_config()
+ config["trusted_third_party_id_servers"] = ["localhost"]
+
+ config["spam_checker"] = {
+ "module": "synapse.rulecheck.domain_rule_checker.DomainRuleChecker",
+ "config": {
+ "default": True,
+ "domain_mapping": {},
+ "can_only_join_rooms_with_invite": True,
+ "can_only_create_one_to_one_rooms": True,
+ "can_only_invite_during_room_creation": True,
+ "can_invite_by_third_party_id": False,
+ },
+ }
+
+ hs = self.setup_test_homeserver(config=config)
+ return hs
+
+ def prepare(self, reactor, clock, hs):
+ self.admin_user_id = self.register_user("admin_user", "pass", admin=True)
+ self.admin_access_token = self.login("admin_user", "pass")
+
+ self.normal_user_id = self.register_user("normal_user", "pass", admin=False)
+ self.normal_access_token = self.login("normal_user", "pass")
+
+ self.other_user_id = self.register_user("other_user", "pass", admin=False)
+
+ def test_admin_can_create_room(self):
+ channel = self._create_room(self.admin_access_token)
+ assert channel.result["code"] == b"200", channel.result
+
+ def test_normal_user_cannot_create_empty_room(self):
+ channel = self._create_room(self.normal_access_token)
+ assert channel.result["code"] == b"403", channel.result
+
+ def test_normal_user_cannot_create_room_with_multiple_invites(self):
+ channel = self._create_room(
+ self.normal_access_token,
+ content={"invite": [self.other_user_id, self.admin_user_id]},
+ )
+ assert channel.result["code"] == b"403", channel.result
+
+ # Test that it correctly counts both normal and third party invites
+ channel = self._create_room(
+ self.normal_access_token,
+ content={
+ "invite": [self.other_user_id],
+ "invite_3pid": [{"medium": "email", "address": "foo@example.com"}],
+ },
+ )
+ assert channel.result["code"] == b"403", channel.result
+
+ # Test that it correctly rejects third party invites
+ channel = self._create_room(
+ self.normal_access_token,
+ content={
+ "invite": [],
+ "invite_3pid": [{"medium": "email", "address": "foo@example.com"}],
+ },
+ )
+ assert channel.result["code"] == b"403", channel.result
+
+ def test_normal_user_can_room_with_single_invites(self):
+ channel = self._create_room(
+ self.normal_access_token, content={"invite": [self.other_user_id]}
+ )
+ assert channel.result["code"] == b"200", channel.result
+
+ def test_cannot_join_public_room(self):
+ channel = self._create_room(self.admin_access_token)
+ assert channel.result["code"] == b"200", channel.result
+
+ room_id = channel.json_body["room_id"]
+
+ self.helper.join(
+ room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=403
+ )
+
+ def test_can_join_invited_room(self):
+ channel = self._create_room(self.admin_access_token)
+ assert channel.result["code"] == b"200", channel.result
+
+ room_id = channel.json_body["room_id"]
+
+ self.helper.invite(
+ room_id,
+ src=self.admin_user_id,
+ targ=self.normal_user_id,
+ tok=self.admin_access_token,
+ )
+
+ self.helper.join(
+ room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200
+ )
+
+ def test_cannot_invite(self):
+ channel = self._create_room(self.admin_access_token)
+ assert channel.result["code"] == b"200", channel.result
+
+ room_id = channel.json_body["room_id"]
+
+ self.helper.invite(
+ room_id,
+ src=self.admin_user_id,
+ targ=self.normal_user_id,
+ tok=self.admin_access_token,
+ )
+
+ self.helper.join(
+ room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200
+ )
+
+ self.helper.invite(
+ room_id,
+ src=self.normal_user_id,
+ targ=self.other_user_id,
+ tok=self.normal_access_token,
+ expect_code=403,
+ )
+
+ def test_cannot_3pid_invite(self):
+ """Test that unbound 3pid invites get rejected.
+ """
+ channel = self._create_room(self.admin_access_token)
+ assert channel.result["code"] == b"200", channel.result
+
+ room_id = channel.json_body["room_id"]
+
+ self.helper.invite(
+ room_id,
+ src=self.admin_user_id,
+ targ=self.normal_user_id,
+ tok=self.admin_access_token,
+ )
+
+ self.helper.join(
+ room_id, self.normal_user_id, tok=self.normal_access_token, expect_code=200
+ )
+
+ self.helper.invite(
+ room_id,
+ src=self.normal_user_id,
+ targ=self.other_user_id,
+ tok=self.normal_access_token,
+ expect_code=403,
+ )
+
+ request, channel = self.make_request(
+ "POST",
+ "rooms/%s/invite" % (room_id),
+ {"address": "foo@bar.com", "medium": "email", "id_server": "localhost"},
+ access_token=self.normal_access_token,
+ )
+ self.render(request)
+ self.assertEqual(channel.code, 403, channel.result["body"])
+
+ def _create_room(self, token, content={}):
+ path = "/_matrix/client/r0/createRoom?access_token=%s" % (token,)
+
+ request, channel = make_request(
+ self.hs.get_reactor(),
+ "POST",
+ path,
+ content=json.dumps(content).encode("utf8"),
+ )
+ render(request, self.resource, self.hs.get_reactor())
+
+ return channel
diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py
index 0f341d3ac3..5bafad9f19 100644
--- a/tests/state/test_v2.py
+++ b/tests/state/test_v2.py
@@ -22,7 +22,7 @@ import attr
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.room_versions import RoomVersions
from synapse.event_auth import auth_types_for_event
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
from synapse.state.v2 import lexicographical_topological_sort, resolve_events_with_store
from synapse.types import EventID
@@ -89,7 +89,7 @@ class FakeEvent(object):
if self.state_key is not None:
event_dict["state_key"] = self.state_key
- return FrozenEvent(event_dict)
+ return make_event_from_dict(event_dict)
# All graphs start with this set of events
diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py
index 9b6f7211ae..7458a37e54 100644
--- a/tests/storage/test_profile.py
+++ b/tests/storage/test_profile.py
@@ -33,9 +33,7 @@ class ProfileStoreTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_displayname(self):
- yield self.store.create_profile(self.u_frank.localpart)
-
- yield self.store.set_profile_displayname(self.u_frank.localpart, "Frank")
+ yield self.store.set_profile_displayname(self.u_frank.localpart, "Frank", 1)
self.assertEquals(
"Frank", (yield self.store.get_profile_displayname(self.u_frank.localpart))
@@ -43,10 +41,8 @@ class ProfileStoreTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_avatar_url(self):
- yield self.store.create_profile(self.u_frank.localpart)
-
yield self.store.set_profile_avatar_url(
- self.u_frank.localpart, "http://my.site/here"
+ self.u_frank.localpart, "http://my.site/here", 1
)
self.assertEquals(
diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py
index feb1c07cb2..b9ee6ec1ec 100644
--- a/tests/storage/test_redaction.py
+++ b/tests/storage/test_redaction.py
@@ -238,8 +238,11 @@ class RedactionTestCase(unittest.HomeserverTestCase):
@defer.inlineCallbacks
def build(self, prev_event_ids):
built_event = yield self._base_builder.build(prev_event_ids)
- built_event.event_id = self._event_id
+
+ built_event._event_id = self._event_id
built_event._event_dict["event_id"] = self._event_id
+ assert built_event.event_id == self._event_id
+
return built_event
@property
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index ca20b085a2..bfa5d6f510 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -18,7 +18,7 @@ import unittest
from synapse import event_auth
from synapse.api.errors import AuthError
from synapse.api.room_versions import RoomVersions
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
class EventAuthTestCase(unittest.TestCase):
@@ -94,7 +94,7 @@ TEST_ROOM_ID = "!test:room"
def _create_event(user_id):
- return FrozenEvent(
+ return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
@@ -106,7 +106,7 @@ def _create_event(user_id):
def _join_event(user_id):
- return FrozenEvent(
+ return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
@@ -119,7 +119,7 @@ def _join_event(user_id):
def _power_levels_event(sender, content):
- return FrozenEvent(
+ return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
@@ -132,7 +132,7 @@ def _power_levels_event(sender, content):
def _random_state_event(sender):
- return FrozenEvent(
+ return make_event_from_dict(
{
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
diff --git a/tests/test_federation.py b/tests/test_federation.py
index 68684460c6..9b5cf562f3 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -2,7 +2,7 @@ from mock import Mock
from twisted.internet.defer import ensureDeferred, maybeDeferred, succeed
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
from synapse.logging.context import LoggingContext
from synapse.types import Requester, UserID
from synapse.util import Clock
@@ -43,7 +43,7 @@ class MessageAcceptTests(unittest.TestCase):
)
)[0]
- join_event = FrozenEvent(
+ join_event = make_event_from_dict(
{
"room_id": self.room_id,
"sender": "@baduser:test.serv",
@@ -105,7 +105,7 @@ class MessageAcceptTests(unittest.TestCase):
)[0]
# Now lie about an event
- lying_event = FrozenEvent(
+ lying_event = make_event_from_dict(
{
"room_id": self.room_id,
"sender": "@baduser:test.serv",
diff --git a/tests/test_state.py b/tests/test_state.py
index 1e4449fa1c..d1578fe581 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -20,7 +20,7 @@ from twisted.internet import defer
from synapse.api.auth import Auth
from synapse.api.constants import EventTypes, Membership
from synapse.api.room_versions import RoomVersions
-from synapse.events import FrozenEvent
+from synapse.events import make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.state import StateHandler, StateResolutionHandler
@@ -66,7 +66,7 @@ def create_event(
d.update(kwargs)
- event = FrozenEvent(d)
+ event = make_event_from_dict(d)
return event
diff --git a/tests/test_types.py b/tests/test_types.py
index 8d97c751ea..7390a1ce62 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -12,9 +12,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from six import string_types
from synapse.api.errors import SynapseError
-from synapse.types import GroupID, RoomAlias, UserID, map_username_to_mxid_localpart
+from synapse.types import (
+ GroupID,
+ RoomAlias,
+ UserID,
+ map_username_to_mxid_localpart,
+ strip_invalid_mxid_characters,
+)
from tests import unittest
@@ -103,3 +110,16 @@ class MapUsernameTestCase(unittest.TestCase):
self.assertEqual(
map_username_to_mxid_localpart("têst".encode("utf-8")), "t=c3=aast"
)
+
+
+class StripInvalidMxidCharactersTestCase(unittest.TestCase):
+ def test_return_type(self):
+ unstripped = strip_invalid_mxid_characters("test")
+ stripped = strip_invalid_mxid_characters("test@")
+
+ self.assertTrue(isinstance(unstripped, string_types), type(unstripped))
+ self.assertTrue(isinstance(stripped, string_types), type(stripped))
+
+ def test_strip(self):
+ stripped = strip_invalid_mxid_characters("test@")
+ self.assertEqual(stripped, "test", stripped)
diff --git a/tox.ini b/tox.ini
index 88ef12bebd..0fa78a0ff7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -138,7 +138,7 @@ commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests scripts-dev
skip_install = True
deps = towncrier>=18.6.0rc1
commands =
- python -m towncrier.check --compare-with=origin/develop
+ python -m towncrier.check --compare-with=origin/dinsic
basepython = python3.6
[testenv:check-sampleconfig]
@@ -179,7 +179,9 @@ extras = all
commands = mypy \
synapse/api \
synapse/config/ \
+ synapse/federation/sender \
synapse/federation/transport \
+ synapse/handlers/sync.py \
synapse/handlers/ui_auth \
synapse/logging/ \
synapse/module_api \
|