diff --git a/.ci/scripts/test_export_data_command.sh b/.ci/scripts/test_export_data_command.sh
new file mode 100755
index 0000000000..75f5811d10
--- /dev/null
+++ b/.ci/scripts/test_export_data_command.sh
@@ -0,0 +1,57 @@
+#!/usr/bin/env bash
+
+# Test for the export-data admin command against sqlite and postgres
+
+set -xe
+cd `dirname $0`/../..
+
+echo "--- Install dependencies"
+
+# Install dependencies for this test.
+pip install psycopg2
+
+# Install Synapse itself. This won't update any libraries.
+pip install -e .
+
+echo "--- Generate the signing key"
+
+# Generate the server's signing key.
+python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml
+
+echo "--- Prepare test database"
+
+# Make sure the SQLite3 database is using the latest schema and has no pending background update.
+scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
+
+# Run the export-data command on the sqlite test database
+python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
+--output-directory /tmp/export_data
+
+# Test that the output directory exists and contains the rooms directory
+dir="/tmp/export_data/rooms"
+if [ -d "$dir" ]; then
+ echo "Command successful, this test passes"
+else
+ echo "No output directories found, the command fails against a sqlite database."
+ exit 1
+fi
+
+# Create the PostgreSQL database.
+.ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
+
+# Port the SQLite databse to postgres so we can check command works against postgres
+echo "+++ Port SQLite3 databse to postgres"
+scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
+
+# Run the export-data command on postgres database
+python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \
+--output-directory /tmp/export_data2
+
+# Test that the output directory exists and contains the rooms directory
+dir2="/tmp/export_data2/rooms"
+if [ -d "$dir2" ]; then
+ echo "Command successful, this test passes"
+else
+ echo "No output directories found, the command fails against a postgres database."
+ exit 1
+fi
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 9e302bf446..8d7e8cafd9 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -253,6 +253,35 @@ jobs:
/logs/results.tap
/logs/**/*.log*
+ export-data:
+ if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
+ needs: [linting-done, portdb]
+ runs-on: ubuntu-latest
+ env:
+ TOP: ${{ github.workspace }}
+
+ services:
+ postgres:
+ image: postgres
+ ports:
+ - 5432:5432
+ env:
+ POSTGRES_PASSWORD: "postgres"
+ POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8"
+ options: >-
+ --health-cmd pg_isready
+ --health-interval 10s
+ --health-timeout 5s
+ --health-retries 5
+
+ steps:
+ - uses: actions/checkout@v2
+ - run: sudo apt-get -qq install xmlsec1
+ - uses: actions/setup-python@v2
+ with:
+ python-version: "3.9"
+ - run: .ci/scripts/test_export_data_command.sh
+
portdb:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
diff --git a/changelog.d/10984.misc b/changelog.d/10984.misc
new file mode 100644
index 0000000000..86c4081cc4
--- /dev/null
+++ b/changelog.d/10984.misc
@@ -0,0 +1 @@
+Fix spurious warnings about losing the logging context on the `ReplicationCommandHandler` when losing the replication connection.
diff --git a/changelog.d/11001.bugfix b/changelog.d/11001.bugfix
new file mode 100644
index 0000000000..f51ffb3481
--- /dev/null
+++ b/changelog.d/11001.bugfix
@@ -0,0 +1 @@
+ Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state.
diff --git a/changelog.d/11008.misc b/changelog.d/11008.misc
new file mode 100644
index 0000000000..a67d95d66f
--- /dev/null
+++ b/changelog.d/11008.misc
@@ -0,0 +1 @@
+Include rejected status when we log events.
diff --git a/changelog.d/11009.bugfix b/changelog.d/11009.bugfix
new file mode 100644
index 0000000000..13b8e5983b
--- /dev/null
+++ b/changelog.d/11009.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug which meant that events received over federation were sometimes incorrectly accepted into the room state.
diff --git a/changelog.d/11014.misc b/changelog.d/11014.misc
new file mode 100644
index 0000000000..4b99ea354f
--- /dev/null
+++ b/changelog.d/11014.misc
@@ -0,0 +1 @@
+Add some extra logging to the event persistence code.
diff --git a/changelog.d/11051.bugfix b/changelog.d/11051.bugfix
new file mode 100644
index 0000000000..63126843d2
--- /dev/null
+++ b/changelog.d/11051.bugfix
@@ -0,0 +1 @@
+Fix a bug where setting a user's external_id via the admin API returns 500 and deletes users existing external mappings if that external ID is already mapped.
\ No newline at end of file
diff --git a/changelog.d/11070.misc b/changelog.d/11070.misc
new file mode 100644
index 0000000000..52b23f9671
--- /dev/null
+++ b/changelog.d/11070.misc
@@ -0,0 +1 @@
+Create a separate module for the retention configuration.
diff --git a/changelog.d/11075.bugfix b/changelog.d/11075.bugfix
new file mode 100644
index 0000000000..9b24971c5a
--- /dev/null
+++ b/changelog.d/11075.bugfix
@@ -0,0 +1 @@
+Fix a long-standing bug where users excluded from the user directory were added into the directory if they belonged to a room which became public or private.
\ No newline at end of file
diff --git a/changelog.d/11078.bugfix b/changelog.d/11078.bugfix
new file mode 100644
index 0000000000..cc813babe4
--- /dev/null
+++ b/changelog.d/11078.bugfix
@@ -0,0 +1 @@
+Fix broken export-data admin command and add test script checking the command to CI.
\ No newline at end of file
diff --git a/changelog.d/11092.doc b/changelog.d/11092.doc
new file mode 100644
index 0000000000..916c2b3476
--- /dev/null
+++ b/changelog.d/11092.doc
@@ -0,0 +1 @@
+Clarify the the sample log config can be copied from the documentation without issue.
diff --git a/changelog.d/11093.doc b/changelog.d/11093.doc
new file mode 100644
index 0000000000..70fca0bdce
--- /dev/null
+++ b/changelog.d/11093.doc
@@ -0,0 +1 @@
+Update the admin API documentation with an updated list of the characters allowed in registration tokens.
diff --git a/changelog.d/11096.doc b/changelog.d/11096.doc
new file mode 100644
index 0000000000..d8e7424289
--- /dev/null
+++ b/changelog.d/11096.doc
@@ -0,0 +1 @@
+Document Synapse's behaviour when dealing with multiple modules registering the same callbacks and/or handlers for the same HTTP endpoints.
diff --git a/changelog.d/11101.bugfix b/changelog.d/11101.bugfix
new file mode 100644
index 0000000000..0de507848f
--- /dev/null
+++ b/changelog.d/11101.bugfix
@@ -0,0 +1 @@
+Show an error when timestamp in seconds is provided to the `/purge_media_cache` Admin API.
\ No newline at end of file
diff --git a/changelog.d/11103.bugfix b/changelog.d/11103.bugfix
new file mode 100644
index 0000000000..3498f04a45
--- /dev/null
+++ b/changelog.d/11103.bugfix
@@ -0,0 +1 @@
+Fix local users who left all their rooms being removed from the user directory, even if the "search_all_users" config option was enabled.
\ No newline at end of file
diff --git a/changelog.d/11109.misc b/changelog.d/11109.misc
new file mode 100644
index 0000000000..d83936ccc4
--- /dev/null
+++ b/changelog.d/11109.misc
@@ -0,0 +1 @@
+Add missing type hints to `synapse.api` module.
diff --git a/changelog.d/11115.misc b/changelog.d/11115.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/11115.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/changelog.d/11116.misc b/changelog.d/11116.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/11116.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/changelog.d/11118.doc b/changelog.d/11118.doc
new file mode 100644
index 0000000000..3c2187f3b1
--- /dev/null
+++ b/changelog.d/11118.doc
@@ -0,0 +1 @@
+Fix instances of `[example]{.title-ref}` in the upgrade documentation as a result of prior RST to Markdown conversion.
diff --git a/changelog.d/11120.bugfix b/changelog.d/11120.bugfix
new file mode 100644
index 0000000000..6b39e3e89d
--- /dev/null
+++ b/changelog.d/11120.bugfix
@@ -0,0 +1 @@
+Identity server connection is no longer ignoring `ip_range_whitelist`.
diff --git a/changelog.d/11121.misc b/changelog.d/11121.misc
new file mode 100644
index 0000000000..916beeaacb
--- /dev/null
+++ b/changelog.d/11121.misc
@@ -0,0 +1 @@
+Add type hints for event fetching.
diff --git a/changelog.d/11122.misc b/changelog.d/11122.misc
new file mode 100644
index 0000000000..9a765435db
--- /dev/null
+++ b/changelog.d/11122.misc
@@ -0,0 +1 @@
+Clean up some of the federation event authentication code for clarity.
diff --git a/changelog.d/11132.doc b/changelog.d/11132.doc
new file mode 100644
index 0000000000..4f38be5b27
--- /dev/null
+++ b/changelog.d/11132.doc
@@ -0,0 +1 @@
+Document the version of Synapse each module callback was introduced in.
diff --git a/changelog.d/11139.misc b/changelog.d/11139.misc
new file mode 100644
index 0000000000..86a9189200
--- /dev/null
+++ b/changelog.d/11139.misc
@@ -0,0 +1 @@
+Update the `sign_json` script to support inline configuration of the signing key.
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index ea05bd6e44..60b8bc7379 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -257,9 +257,9 @@ POST /_synapse/admin/v1/media/<server_name>/delete?before_ts=<before_ts>
URL Parameters
* `server_name`: string - The name of your local server (e.g `matrix.org`).
-* `before_ts`: string representing a positive integer - Unix timestamp in ms.
+* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
Files that were last used before this timestamp will be deleted. It is the timestamp of
-last access and not the timestamp creation.
+last access, not the timestamp when the file was created.
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
Files that are larger will be deleted. Defaults to `0`.
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files
@@ -302,7 +302,7 @@ POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
URL Parameters
-* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in ms.
+* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in milliseconds.
All cached media that was last accessed before this timestamp will be removed.
Response:
diff --git a/docs/modules/account_validity_callbacks.md b/docs/modules/account_validity_callbacks.md
index 80684b7828..3cd0e72198 100644
--- a/docs/modules/account_validity_callbacks.md
+++ b/docs/modules/account_validity_callbacks.md
@@ -9,6 +9,8 @@ The available account validity callbacks are:
### `is_user_expired`
+_First introduced in Synapse v1.39.0_
+
```python
async def is_user_expired(user: str) -> Optional[bool]
```
@@ -22,8 +24,15 @@ If the module returns `True`, the current request will be denied with the error
`ORG_MATRIX_EXPIRED_ACCOUNT` and the HTTP status code 403. Note that this doesn't
invalidate the user's access token.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `None`, Synapse falls through to the next one. The value of the first
+callback that does not return `None` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `on_user_registration`
+_First introduced in Synapse v1.39.0_
+
```python
async def on_user_registration(user: str) -> None
```
@@ -31,3 +40,5 @@ async def on_user_registration(user: str) -> None
Called after successfully registering a user, in case the module needs to perform extra
operations to keep track of them. (e.g. add them to a database table). The user is
represented by their Matrix user ID.
+
+If multiple modules implement this callback, Synapse runs them all in order.
diff --git a/docs/modules/index.md b/docs/modules/index.md
index 3fda8cb7f0..0a868b309f 100644
--- a/docs/modules/index.md
+++ b/docs/modules/index.md
@@ -2,6 +2,11 @@
Synapse supports extending its functionality by configuring external modules.
+**Note**: When using third-party modules, you effectively allow someone else to run
+custom code on your Synapse homeserver. Server admins are encouraged to verify the
+provenance of the modules they use on their homeserver and make sure the modules aren't
+running malicious code on their instance.
+
## Using modules
To use a module on Synapse, add it to the `modules` section of the configuration file:
@@ -18,17 +23,31 @@ modules:
Each module is defined by a path to a Python class as well as a configuration. This
information for a given module should be available in the module's own documentation.
-**Note**: When using third-party modules, you effectively allow someone else to run
-custom code on your Synapse homeserver. Server admins are encouraged to verify the
-provenance of the modules they use on their homeserver and make sure the modules aren't
-running malicious code on their instance.
+## Using multiple modules
+
+The order in which modules are listed in this section is important. When processing an
+action that can be handled by several modules, Synapse will always prioritise the module
+that appears first (i.e. is the highest in the list). This means:
+
+* If several modules register the same callback, the callback registered by the module
+ that appears first is used.
+* If several modules try to register a handler for the same HTTP path, only the handler
+ registered by the module that appears first is used. Handlers registered by the other
+ module(s) are ignored and Synapse will log a warning message about them.
+
+Note that Synapse doesn't allow multiple modules implementing authentication checkers via
+the password auth provider feature for the same login type with different fields. If this
+happens, Synapse will refuse to start.
+
+## Current status
-Also note that we are currently in the process of migrating module interfaces to this
-system. While some interfaces might be compatible with it, others still require
-configuring modules in another part of Synapse's configuration file.
+We are currently in the process of migrating module interfaces to this system. While some
+interfaces might be compatible with it, others still require configuring modules in
+another part of Synapse's configuration file.
Currently, only the following pre-existing interfaces are compatible with this new system:
* spam checker
* third-party rules
* presence router
+* password auth providers
diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md
index 36417dd39e..9dddfdfaaa 100644
--- a/docs/modules/password_auth_provider_callbacks.md
+++ b/docs/modules/password_auth_provider_callbacks.md
@@ -8,6 +8,8 @@ registered by using the Module API's `register_password_auth_provider_callbacks`
### `auth_checkers`
+_First introduced in Synapse v1.46.0_
+
```
auth_checkers: Dict[Tuple[str,Tuple], Callable]
```
@@ -44,8 +46,19 @@ instead.
If the authentication is unsuccessful, the module must return `None`.
+If multiple modules register an auth checker for the same login type but with different
+fields, Synapse will refuse to start.
+
+If multiple modules register an auth checker for the same login type with the same fields,
+then the callbacks will be executed in order, until one returns a Matrix User ID (and
+optionally a callback). In that case, the return value of that callback will be accepted
+and subsequent callbacks will not be fired. If every callback returns `None`, then the
+authentication fails.
+
### `check_3pid_auth`
+_First introduced in Synapse v1.46.0_
+
```python
async def check_3pid_auth(
medium: str,
@@ -67,10 +80,18 @@ If the authentication is successful, the module must return the user's Matrix ID
`@alice:example.com`) and optionally a callback to be called with the response to the `/login` request.
If the module doesn't wish to return a callback, it must return None instead.
-If the authentication is unsuccessful, the module must return None.
+If the authentication is unsuccessful, the module must return `None`.
+
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `None`, Synapse falls through to the next one. The value of the first
+callback that does not return `None` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback. If every callback return `None`,
+the authentication is denied.
### `on_logged_out`
+_First introduced in Synapse v1.46.0_
+
```python
async def on_logged_out(
user_id: str,
@@ -82,6 +103,8 @@ Called during a logout request for a user. It is passed the qualified user ID, t
deactivated device (if any: access tokens are occasionally created without an associated
device ID), and the (now deactivated) access token.
+If multiple modules implement this callback, Synapse runs them all in order.
+
## Example
The example module below implements authentication checkers for two different login types:
diff --git a/docs/modules/presence_router_callbacks.md b/docs/modules/presence_router_callbacks.md
index 4abcc9af47..d3da25cef4 100644
--- a/docs/modules/presence_router_callbacks.md
+++ b/docs/modules/presence_router_callbacks.md
@@ -10,6 +10,8 @@ The available presence router callbacks are:
### `get_users_for_states`
+_First introduced in Synapse v1.42.0_
+
```python
async def get_users_for_states(
state_updates: Iterable["synapse.api.UserPresenceState"],
@@ -24,8 +26,14 @@ must return a dictionary that maps from Matrix user IDs (which can be local or r
Synapse will then attempt to send the specified presence updates to each user when possible.
+If multiple modules implement this callback, Synapse merges all the dictionaries returned
+by the callbacks. If multiple callbacks return a dictionary containing the same key,
+Synapse concatenates the sets associated with this key from each dictionary.
+
### `get_interested_users`
+_First introduced in Synapse v1.42.0_
+
```python
async def get_interested_users(
user_id: str
@@ -44,6 +52,12 @@ query. The returned users can be local or remote.
Alternatively the callback can return `synapse.module_api.PRESENCE_ALL_USERS`
to indicate that the user should receive updates from all known users.
+If multiple modules implement this callback, they will be considered in order. Synapse
+calls each callback one by one, and use a concatenation of all the `set`s returned by the
+callbacks. If one callback returns `synapse.module_api.PRESENCE_ALL_USERS`, Synapse uses
+this value instead. If this happens, Synapse does not call any of the subsequent
+implementations of this callback.
+
## Example
The example below is a module that implements both presence router callbacks, and ensures
diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md
index 787e99074a..534ea196e0 100644
--- a/docs/modules/spam_checker_callbacks.md
+++ b/docs/modules/spam_checker_callbacks.md
@@ -10,6 +10,8 @@ The available spam checker callbacks are:
### `check_event_for_spam`
+_First introduced in Synapse v1.37.0_
+
```python
async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str]
```
@@ -19,8 +21,15 @@ either a `bool` to indicate whether the event must be rejected because of spam,
to indicate the event must be rejected because of spam and to give a rejection reason to
forward to clients.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `False`, Synapse falls through to the next one. The value of the first
+callback that does not return `False` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `user_may_join_room`
+_First introduced in Synapse v1.37.0_
+
```python
async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool
```
@@ -34,8 +43,15 @@ currently has a pending invite in the room.
This callback isn't called if the join is performed by a server administrator, or in the
context of a room creation.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `user_may_invite`
+_First introduced in Synapse v1.37.0_
+
```python
async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool
```
@@ -44,8 +60,15 @@ Called when processing an invitation. The module must return a `bool` indicating
the inviter can invite the invitee to the given room. Both inviter and invitee are
represented by their Matrix user ID (e.g. `@alice:example.com`).
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `user_may_send_3pid_invite`
+_First introduced in Synapse v1.45.0_
+
```python
async def user_may_send_3pid_invite(
inviter: str,
@@ -79,8 +102,15 @@ await user_may_send_3pid_invite(
**Note**: If the third-party identifier is already associated with a matrix user ID,
[`user_may_invite`](#user_may_invite) will be used instead.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `user_may_create_room`
+_First introduced in Synapse v1.37.0_
+
```python
async def user_may_create_room(user: str) -> bool
```
@@ -88,8 +118,15 @@ async def user_may_create_room(user: str) -> bool
Called when processing a room creation request. The module must return a `bool` indicating
whether the given user (represented by their Matrix user ID) is allowed to create a room.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `user_may_create_room_with_invites`
+_First introduced in Synapse v1.44.0_
+
```python
async def user_may_create_room_with_invites(
user: str,
@@ -117,8 +154,15 @@ corresponding list(s) will be empty.
since no invites are sent when cloning a room. To cover this case, modules also need to
implement `user_may_create_room`.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `user_may_create_room_alias`
+_First introduced in Synapse v1.37.0_
+
```python
async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool
```
@@ -127,8 +171,15 @@ Called when trying to associate an alias with an existing room. The module must
`bool` indicating whether the given user (represented by their Matrix user ID) is allowed
to set the given alias.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `user_may_publish_room`
+_First introduced in Synapse v1.37.0_
+
```python
async def user_may_publish_room(user: str, room_id: str) -> bool
```
@@ -137,8 +188,15 @@ Called when trying to publish a room to the homeserver's public rooms directory.
module must return a `bool` indicating whether the given user (represented by their
Matrix user ID) is allowed to publish the given room.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `check_username_for_spam`
+_First introduced in Synapse v1.37.0_
+
```python
async def check_username_for_spam(user_profile: Dict[str, str]) -> bool
```
@@ -154,8 +212,15 @@ is represented as a dictionary with the following keys:
The module is given a copy of the original dictionary, so modifying it from within the
module cannot modify a user's profile when included in user directory search results.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `False`, Synapse falls through to the next one. The value of the first
+callback that does not return `False` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `check_registration_for_spam`
+_First introduced in Synapse v1.37.0_
+
```python
async def check_registration_for_spam(
email_threepid: Optional[dict],
@@ -179,8 +244,16 @@ The arguments passed to this callback are:
used during the registration process.
* `auth_provider_id`: The identifier of the SSO authentication provider, if any.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `RegistrationBehaviour.ALLOW`, Synapse falls through to the next one.
+The value of the first callback that does not return `RegistrationBehaviour.ALLOW` will
+be used. If this happens, Synapse will not call any of the subsequent implementations of
+this callback.
+
### `check_media_file_for_spam`
+_First introduced in Synapse v1.37.0_
+
```python
async def check_media_file_for_spam(
file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper",
@@ -191,6 +264,11 @@ async def check_media_file_for_spam(
Called when storing a local or remote file. The module must return a boolean indicating
whether the given file can be stored in the homeserver's media store.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `False`, Synapse falls through to the next one. The value of the first
+callback that does not return `False` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
## Example
The example below is a module that implements the spam checker callback
diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md
index 2ba6f39453..034923da0f 100644
--- a/docs/modules/third_party_rules_callbacks.md
+++ b/docs/modules/third_party_rules_callbacks.md
@@ -10,6 +10,8 @@ The available third party rules callbacks are:
### `check_event_allowed`
+_First introduced in Synapse v1.39.0_
+
```python
async def check_event_allowed(
event: "synapse.events.EventBase",
@@ -44,8 +46,15 @@ dictionary, and modify the returned dictionary accordingly.
Note that replacing the event only works for events sent by local users, not for events
received over federation.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `on_create_room`
+_First introduced in Synapse v1.39.0_
+
```python
async def on_create_room(
requester: "synapse.types.Requester",
@@ -63,8 +72,16 @@ the request is a server admin.
Modules can modify the `request_content` (by e.g. adding events to its `initial_state`),
or deny the room's creation by raising a `module_api.errors.SynapseError`.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns without raising an exception, Synapse falls through to the next one. The
+room creation will be forbidden as soon as one of the callbacks raises an exception. If
+this happens, Synapse will not call any of the subsequent implementations of this
+callback.
+
### `check_threepid_can_be_invited`
+_First introduced in Synapse v1.39.0_
+
```python
async def check_threepid_can_be_invited(
medium: str,
@@ -76,8 +93,15 @@ async def check_threepid_can_be_invited(
Called when processing an invite via a third-party identifier (i.e. email or phone number).
The module must return a boolean indicating whether the invite can go through.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
### `check_visibility_can_be_modified`
+_First introduced in Synapse v1.39.0_
+
```python
async def check_visibility_can_be_modified(
room_id: str,
@@ -90,6 +114,11 @@ Called when changing the visibility of a room in the local public room directory
visibility is a string that's either "public" or "private". The module must return a
boolean indicating whether the change can go through.
+If multiple modules implement this callback, they will be considered in order. If a
+callback returns `True`, Synapse falls through to the next one. The value of the first
+callback that does not return `True` will be used. If this happens, Synapse will not call
+any of the subsequent implementations of this callback.
+
## Example
The example below is a module that implements the third-party rules callback
diff --git a/docs/modules/writing_a_module.md b/docs/modules/writing_a_module.md
index 4f2fec8dc9..7764e06692 100644
--- a/docs/modules/writing_a_module.md
+++ b/docs/modules/writing_a_module.md
@@ -12,6 +12,21 @@ configuration associated with the module in Synapse's configuration file.
See the documentation for the `ModuleApi` class
[here](https://github.com/matrix-org/synapse/blob/master/synapse/module_api/__init__.py).
+## When Synapse runs with several modules configured
+
+If Synapse is running with other modules configured, the order each module appears in
+within the `modules` section of the Synapse configuration file might restrict what it can
+or cannot register. See [this section](index.html#using-multiple-modules) for more
+information.
+
+On top of the rules listed in the link above, if a callback returns a value that should
+cause the current operation to fail (e.g. if a callback checking an event returns with a
+value that should cause the event to be denied), Synapse will fail the operation and
+ignore any subsequent callbacks that should have been run after this one.
+
+The documentation for each callback mentions how Synapse behaves when
+multiple modules implement it.
+
## Handling the module's configuration
A module can implement the following static method:
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 7bfaed483b..b90ed62d61 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -472,6 +472,48 @@ limit_remote_rooms:
#
#user_ips_max_age: 14d
+# Inhibits the /requestToken endpoints from returning an error that might leak
+# information about whether an e-mail address is in use or not on this
+# homeserver.
+# Note that for some endpoints the error situation is the e-mail already being
+# used, and for others the error is entering the e-mail being unused.
+# If this option is enabled, instead of returning an error, these endpoints will
+# act as if no error happened and return a fake session ID ('sid') to clients.
+#
+#request_token_inhibit_3pid_errors: true
+
+# A list of domains that the domain portion of 'next_link' parameters
+# must match.
+#
+# This parameter is optionally provided by clients while requesting
+# validation of an email or phone number, and maps to a link that
+# users will be automatically redirected to after validation
+# succeeds. Clients can make use this parameter to aid the validation
+# process.
+#
+# The whitelist is applied whether the homeserver or an
+# identity server is handling validation.
+#
+# The default value is no whitelist functionality; all domains are
+# allowed. Setting this value to an empty list will instead disallow
+# all domains.
+#
+#next_link_domain_whitelist: ["matrix.org"]
+
+# Templates to use when generating email or HTML page contents.
+#
+templates:
+ # Directory in which Synapse will try to find template files to use to generate
+ # email or HTML page contents.
+ # If not set, or a file is not found within the template directory, a default
+ # template from within the Synapse package will be used.
+ #
+ # See https://matrix-org.github.io/synapse/latest/templates.html for more
+ # information about using custom templates.
+ #
+ #custom_template_directory: /path/to/custom/templates/
+
+
# Message retention policy at the server level.
#
# Room admins and mods can define a retention period for their rooms using the
@@ -541,47 +583,6 @@ retention:
# - shortest_max_lifetime: 3d
# interval: 1d
-# Inhibits the /requestToken endpoints from returning an error that might leak
-# information about whether an e-mail address is in use or not on this
-# homeserver.
-# Note that for some endpoints the error situation is the e-mail already being
-# used, and for others the error is entering the e-mail being unused.
-# If this option is enabled, instead of returning an error, these endpoints will
-# act as if no error happened and return a fake session ID ('sid') to clients.
-#
-#request_token_inhibit_3pid_errors: true
-
-# A list of domains that the domain portion of 'next_link' parameters
-# must match.
-#
-# This parameter is optionally provided by clients while requesting
-# validation of an email or phone number, and maps to a link that
-# users will be automatically redirected to after validation
-# succeeds. Clients can make use this parameter to aid the validation
-# process.
-#
-# The whitelist is applied whether the homeserver or an
-# identity server is handling validation.
-#
-# The default value is no whitelist functionality; all domains are
-# allowed. Setting this value to an empty list will instead disallow
-# all domains.
-#
-#next_link_domain_whitelist: ["matrix.org"]
-
-# Templates to use when generating email or HTML page contents.
-#
-templates:
- # Directory in which Synapse will try to find template files to use to generate
- # email or HTML page contents.
- # If not set, or a file is not found within the template directory, a default
- # template from within the Synapse package will be used.
- #
- # See https://matrix-org.github.io/synapse/latest/templates.html for more
- # information about using custom templates.
- #
- #custom_template_directory: /path/to/custom/templates/
-
## TLS ##
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 8de96cb3e7..c47eef1a20 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -348,24 +348,24 @@ Please ensure your Application Services are up to date.
## Requirement for X-Forwarded-Proto header
When using Synapse with a reverse proxy (in particular, when using the
-[x_forwarded]{.title-ref} option on an HTTP listener), Synapse now
-expects to receive an [X-Forwarded-Proto]{.title-ref} header on incoming
+`x_forwarded` option on an HTTP listener), Synapse now
+expects to receive an `X-Forwarded-Proto` header on incoming
HTTP requests. If it is not set, Synapse will log a warning on each
received request.
To avoid the warning, administrators using a reverse proxy should ensure
-that the reverse proxy sets [X-Forwarded-Proto]{.title-ref} header to
-[https]{.title-ref} or [http]{.title-ref} to indicate the protocol used
+that the reverse proxy sets `X-Forwarded-Proto` header to
+`https` or `http` to indicate the protocol used
by the client.
-Synapse also requires the [Host]{.title-ref} header to be preserved.
+Synapse also requires the `Host` header to be preserved.
See the [reverse proxy documentation](reverse_proxy.md), where the
example configurations have been updated to show how to set these
headers.
(Users of [Caddy](https://caddyserver.com/) are unaffected, since we
-believe it sets [X-Forwarded-Proto]{.title-ref} by default.)
+believe it sets `X-Forwarded-Proto` by default.)
# Upgrading to v1.27.0
@@ -529,13 +529,13 @@ mapping provider to specify different algorithms, instead of the
way](<https://matrix.org/docs/spec/appendices#mapping-from-other-character-sets>).
If your Synapse configuration uses a custom mapping provider
-([oidc_config.user_mapping_provider.module]{.title-ref} is specified and
+(`oidc_config.user_mapping_provider.module` is specified and
not equal to
-[synapse.handlers.oidc_handler.JinjaOidcMappingProvider]{.title-ref})
-then you *must* ensure that [map_user_attributes]{.title-ref} of the
+`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`)
+then you *must* ensure that `map_user_attributes` of the
mapping provider performs some normalisation of the
-[localpart]{.title-ref} returned. To match previous behaviour you can
-use the [map_username_to_mxid_localpart]{.title-ref} function provided
+`localpart` returned. To match previous behaviour you can
+use the `map_username_to_mxid_localpart` function provided
by Synapse. An example is shown below:
```python
@@ -564,7 +564,7 @@ v1.24.0. The Admin API is now only accessible under:
- `/_synapse/admin/v1`
-The only exception is the [/admin/whois]{.title-ref} endpoint, which is
+The only exception is the `/admin/whois` endpoint, which is
[also available via the client-server
API](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid).
@@ -639,7 +639,7 @@ This page will appear to the user after clicking a password reset link
that has been emailed to them.
To complete password reset, the page must include a way to make a
-[POST]{.title-ref} request to
+`POST` request to
`/_synapse/client/password_reset/{medium}/submit_token` with the query
parameters from the original link, presented as a URL-encoded form. See
the file itself for more details.
@@ -660,18 +660,18 @@ but the parameters are slightly different:
# Upgrading to v1.18.0
-## Docker [-py3]{.title-ref} suffix will be removed in future versions
+## Docker `-py3` suffix will be removed in future versions
From 10th August 2020, we will no longer publish Docker images with the
-[-py3]{.title-ref} tag suffix. The images tagged with the
-[-py3]{.title-ref} suffix have been identical to the non-suffixed tags
+`-py3` tag suffix. The images tagged with the
+`-py3` suffix have been identical to the non-suffixed tags
since release 0.99.0, and the suffix is obsolete.
-On 10th August, we will remove the [latest-py3]{.title-ref} tag.
-Existing per-release tags (such as [v1.18.0-py3]{.title-ref}) will not
-be removed, but no new [-py3]{.title-ref} tags will be added.
+On 10th August, we will remove the `latest-py3` tag.
+Existing per-release tags (such as `v1.18.0-py3` will not
+be removed, but no new `-py3` tags will be added.
-Scripts relying on the [-py3]{.title-ref} suffix will need to be
+Scripts relying on the `-py3` suffix will need to be
updated.
## Redis replication is now recommended in lieu of TCP replication
@@ -705,8 +705,8 @@ This will *not* be a problem for Synapse installations which were:
If completeness of the room directory is a concern, installations which
are affected can be repaired as follows:
-1. Run the following sql from a [psql]{.title-ref} or
- [sqlite3]{.title-ref} console:
+1. Run the following sql from a `psql` or
+ `sqlite3` console:
```sql
INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES
@@ -770,8 +770,8 @@ participating in many rooms.
of any problems.
1. As an initial check to see if you will be affected, you can try
- running the following query from the [psql]{.title-ref} or
- [sqlite3]{.title-ref} console. It is safe to run it while Synapse is
+ running the following query from the `psql` or
+ `sqlite3` console. It is safe to run it while Synapse is
still running.
```sql
@@ -1353,9 +1353,9 @@ first need to upgrade the database by running:
python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
-Where [<db>]{.title-ref} is the location of the database,
-[<server_name>]{.title-ref} is the server name as specified in the
-synapse configuration, and [<signing_key>]{.title-ref} is the location
+Where `<db>` is the location of the database,
+`<server_name>` is the server name as specified in the
+synapse configuration, and `<signing_key>` is the location
of the signing key as specified in the synapse configuration.
This may take some time to complete. Failures of signatures and content
diff --git a/docs/usage/administration/admin_api/registration_tokens.md b/docs/usage/administration/admin_api/registration_tokens.md
index c48d060dcc..13d5eb75e9 100644
--- a/docs/usage/administration/admin_api/registration_tokens.md
+++ b/docs/usage/administration/admin_api/registration_tokens.md
@@ -149,7 +149,7 @@ POST /_synapse/admin/v1/registration_tokens/new
The request body must be a JSON object and can contain the following fields:
- `token`: The registration token. A string of no more than 64 characters that
- consists only of characters matched by the regex `[A-Za-z0-9-_]`.
+ consists only of characters matched by the regex `[A-Za-z0-9._~-]`.
Default: randomly generated.
- `uses_allowed`: The integer number of times the token can be used to complete
a registration before it becomes invalid.
diff --git a/docs/usage/configuration/logging_sample_config.md b/docs/usage/configuration/logging_sample_config.md
index a673d487b8..499ab7cfe5 100644
--- a/docs/usage/configuration/logging_sample_config.md
+++ b/docs/usage/configuration/logging_sample_config.md
@@ -2,13 +2,13 @@
Below is a sample logging configuration file. This file can be tweaked to control how your
homeserver will output logs. A restart of the server is generally required to apply any
-changes made to this file.
+changes made to this file. The value of the `log_config` option in your homeserver
+config should be the path to this file.
-Note that the contents below are *not* intended to be copied and used as the basis for
-a real homeserver.yaml. Instead, if you are starting from scratch, please generate
-a fresh config using Synapse by following the instructions in
-[Installation](../../setup/installation.md).
+Note that a default logging configuration (shown below) is created automatically alongside
+the homeserver config when following the [installation instructions](../../setup/installation.md).
+It should be named `<SERVERNAME>.log.config` by default.
```yaml
{{#include ../../sample_log_config.yaml}}
-```
\ No newline at end of file
+```
diff --git a/mypy.ini b/mypy.ini
index cb4489eb37..14d8bb8eaf 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -100,6 +100,9 @@ files =
tests/util/test_itertools.py,
tests/util/test_stream_change_cache.py
+[mypy-synapse.api.*]
+disallow_untyped_defs = True
+
[mypy-synapse.events.*]
disallow_untyped_defs = True
diff --git a/scripts-dev/sign_json b/scripts-dev/sign_json
index 4a43d3f2b0..6ac55ef2f7 100755
--- a/scripts-dev/sign_json
+++ b/scripts-dev/sign_json
@@ -52,12 +52,18 @@ Example usage:
)
parser.add_argument(
+ "-K",
+ "--signing-key",
+ help="The private ed25519 key to sign the request with.",
+ )
+
+ parser.add_argument(
"-c",
"--config",
default="homeserver.yaml",
help=(
"Path to synapse config file, from which the server name and/or signing "
- "key path will be read. Ignored if --server-name and --signing-key-path "
+ "key path will be read. Ignored if --server-name and --signing-key(-path) "
"are both given."
),
)
@@ -87,11 +93,14 @@ Example usage:
args = parser.parse_args()
- if not args.server_name or not args.signing_key_path:
+ if not args.server_name or not (args.signing_key_path or args.signing_key):
read_args_from_config(args)
- with open(args.signing_key_path) as f:
- key = read_signing_keys(f)[0]
+ if args.signing_key:
+ keys = read_signing_keys([args.signing_key])
+ else:
+ with open(args.signing_key_path) as f:
+ keys = read_signing_keys(f)
json_to_sign = args.input_data
if json_to_sign is None:
@@ -107,7 +116,7 @@ Example usage:
print("Input json was not an object", file=sys.stderr)
sys.exit(1)
- sign_json(obj, args.server_name, key)
+ sign_json(obj, args.server_name, keys[0])
for c in json_encoder.iterencode(obj):
args.output.write(c)
args.output.write("\n")
@@ -118,8 +127,17 @@ def read_args_from_config(args: argparse.Namespace) -> None:
config = yaml.safe_load(fh)
if not args.server_name:
args.server_name = config["server_name"]
- if not args.signing_key_path:
- args.signing_key_path = config["signing_key_path"]
+ if not args.signing_key_path and not args.signing_key:
+ if "signing_key" in config:
+ args.signing_key = config["signing_key"]
+ elif "signing_key_path" in config:
+ args.signing_key_path = config["signing_key_path"]
+ else:
+ print(
+ "A signing key must be given on the commandline or in the config file.",
+ file=sys.stderr,
+ )
+ sys.exit(1)
if __name__ == "__main__":
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index e6ca9232ee..44883c6663 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -245,7 +245,7 @@ class Auth:
async def validate_appservice_can_control_user_id(
self, app_service: ApplicationService, user_id: str
- ):
+ ) -> None:
"""Validates that the app service is allowed to control
the given user.
@@ -618,5 +618,13 @@ class Auth:
% (user_id, room_id),
)
- async def check_auth_blocking(self, *args, **kwargs) -> None:
- await self._auth_blocking.check_auth_blocking(*args, **kwargs)
+ async def check_auth_blocking(
+ self,
+ user_id: Optional[str] = None,
+ threepid: Optional[dict] = None,
+ user_type: Optional[str] = None,
+ requester: Optional[Requester] = None,
+ ) -> None:
+ await self._auth_blocking.check_auth_blocking(
+ user_id=user_id, threepid=threepid, user_type=user_type, requester=requester
+ )
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index 9480f448d7..685d1c25cf 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -18,7 +18,7 @@
import logging
import typing
from http import HTTPStatus
-from typing import Dict, List, Optional, Union
+from typing import Any, Dict, List, Optional, Union
from twisted.web import http
@@ -143,7 +143,7 @@ class SynapseError(CodeMessageException):
super().__init__(code, msg)
self.errcode = errcode
- def error_dict(self):
+ def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode)
@@ -175,7 +175,7 @@ class ProxiedRequestError(SynapseError):
else:
self._additional_fields = dict(additional_fields)
- def error_dict(self):
+ def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, **self._additional_fields)
@@ -196,7 +196,7 @@ class ConsentNotGivenError(SynapseError):
)
self._consent_uri = consent_uri
- def error_dict(self):
+ def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
@@ -262,14 +262,10 @@ class InteractiveAuthIncompleteError(Exception):
class UnrecognizedRequestError(SynapseError):
"""An error indicating we don't understand the request you're trying to make"""
- def __init__(self, *args, **kwargs):
- if "errcode" not in kwargs:
- kwargs["errcode"] = Codes.UNRECOGNIZED
- if len(args) == 0:
- message = "Unrecognized request"
- else:
- message = args[0]
- super().__init__(400, message, **kwargs)
+ def __init__(
+ self, msg: str = "Unrecognized request", errcode: str = Codes.UNRECOGNIZED
+ ):
+ super().__init__(400, msg, errcode)
class NotFoundError(SynapseError):
@@ -284,10 +280,8 @@ class AuthError(SynapseError):
other poorly-defined times.
"""
- def __init__(self, *args, **kwargs):
- if "errcode" not in kwargs:
- kwargs["errcode"] = Codes.FORBIDDEN
- super().__init__(*args, **kwargs)
+ def __init__(self, code: int, msg: str, errcode: str = Codes.FORBIDDEN):
+ super().__init__(code, msg, errcode)
class InvalidClientCredentialsError(SynapseError):
@@ -321,7 +315,7 @@ class InvalidClientTokenError(InvalidClientCredentialsError):
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
self._soft_logout = soft_logout
- def error_dict(self):
+ def error_dict(self) -> "JsonDict":
d = super().error_dict()
d["soft_logout"] = self._soft_logout
return d
@@ -345,7 +339,7 @@ class ResourceLimitError(SynapseError):
self.limit_type = limit_type
super().__init__(code, msg, errcode=errcode)
- def error_dict(self):
+ def error_dict(self) -> "JsonDict":
return cs_error(
self.msg,
self.errcode,
@@ -357,32 +351,17 @@ class ResourceLimitError(SynapseError):
class EventSizeError(SynapseError):
"""An error raised when an event is too big."""
- def __init__(self, *args, **kwargs):
- if "errcode" not in kwargs:
- kwargs["errcode"] = Codes.TOO_LARGE
- super().__init__(413, *args, **kwargs)
-
-
-class EventStreamError(SynapseError):
- """An error raised when there a problem with the event stream."""
-
- def __init__(self, *args, **kwargs):
- if "errcode" not in kwargs:
- kwargs["errcode"] = Codes.BAD_PAGINATION
- super().__init__(*args, **kwargs)
+ def __init__(self, msg: str):
+ super().__init__(413, msg, Codes.TOO_LARGE)
class LoginError(SynapseError):
"""An error raised when there was a problem logging in."""
- pass
-
class StoreError(SynapseError):
"""An error raised when there was a problem storing some data."""
- pass
-
class InvalidCaptchaError(SynapseError):
def __init__(
@@ -395,7 +374,7 @@ class InvalidCaptchaError(SynapseError):
super().__init__(code, msg, errcode)
self.error_url = error_url
- def error_dict(self):
+ def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, error_url=self.error_url)
@@ -412,7 +391,7 @@ class LimitExceededError(SynapseError):
super().__init__(code, msg, errcode)
self.retry_after_ms = retry_after_ms
- def error_dict(self):
+ def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, retry_after_ms=self.retry_after_ms)
@@ -443,10 +422,8 @@ class UnsupportedRoomVersionError(SynapseError):
class ThreepidValidationError(SynapseError):
"""An error raised when there was a problem authorising an event."""
- def __init__(self, *args, **kwargs):
- if "errcode" not in kwargs:
- kwargs["errcode"] = Codes.FORBIDDEN
- super().__init__(*args, **kwargs)
+ def __init__(self, msg: str, errcode: str = Codes.FORBIDDEN):
+ super().__init__(400, msg, errcode)
class IncompatibleRoomVersionError(SynapseError):
@@ -466,7 +443,7 @@ class IncompatibleRoomVersionError(SynapseError):
self._room_version = room_version
- def error_dict(self):
+ def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, room_version=self._room_version)
@@ -494,7 +471,7 @@ class RequestSendFailed(RuntimeError):
errors (like programming errors).
"""
- def __init__(self, inner_exception, can_retry):
+ def __init__(self, inner_exception: BaseException, can_retry: bool):
super().__init__(
"Failed to send request: %s: %s"
% (type(inner_exception).__name__, inner_exception)
@@ -503,7 +480,7 @@ class RequestSendFailed(RuntimeError):
self.can_retry = can_retry
-def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs):
+def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
"""Utility method for constructing an error response for client-server
interactions.
@@ -551,7 +528,7 @@ class FederationError(RuntimeError):
msg = "%s %s: %s" % (level, code, reason)
super().__init__(msg)
- def get_dict(self):
+ def get_dict(self) -> "JsonDict":
return {
"level": self.level,
"code": self.code,
@@ -580,7 +557,7 @@ class HttpResponseException(CodeMessageException):
super().__init__(code, msg)
self.response = response
- def to_synapse_error(self):
+ def to_synapse_error(self) -> SynapseError:
"""Make a SynapseError based on an HTTPResponseException
This is useful when a proxied request has failed, and we need to
diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py
index 20e91a115d..bc550ae646 100644
--- a/synapse/api/filtering.py
+++ b/synapse/api/filtering.py
@@ -231,24 +231,24 @@ class FilterCollection:
def include_redundant_members(self) -> bool:
return self._room_state_filter.include_redundant_members()
- def filter_presence(self, events):
+ def filter_presence(
+ self, events: Iterable[UserPresenceState]
+ ) -> List[UserPresenceState]:
return self._presence_filter.filter(events)
- def filter_account_data(self, events):
+ def filter_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
return self._account_data.filter(events)
- def filter_room_state(self, events):
+ def filter_room_state(self, events: Iterable[EventBase]) -> List[EventBase]:
return self._room_state_filter.filter(self._room_filter.filter(events))
- def filter_room_timeline(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
+ def filter_room_timeline(self, events: Iterable[EventBase]) -> List[EventBase]:
return self._room_timeline_filter.filter(self._room_filter.filter(events))
- def filter_room_ephemeral(self, events: Iterable[FilterEvent]) -> List[FilterEvent]:
+ def filter_room_ephemeral(self, events: Iterable[JsonDict]) -> List[JsonDict]:
return self._room_ephemeral_filter.filter(self._room_filter.filter(events))
- def filter_room_account_data(
- self, events: Iterable[FilterEvent]
- ) -> List[FilterEvent]:
+ def filter_room_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]:
return self._room_account_data.filter(self._room_filter.filter(events))
def blocks_all_presence(self) -> bool:
@@ -309,7 +309,7 @@ class Filter:
# except for presence which actually gets passed around as its own
# namedtuple type.
if isinstance(event, UserPresenceState):
- sender = event.user_id
+ sender: Optional[str] = event.user_id
room_id = None
ev_type = "m.presence"
contains_url = False
diff --git a/synapse/api/presence.py b/synapse/api/presence.py
index a3bf0348d1..b80aa83cb3 100644
--- a/synapse/api/presence.py
+++ b/synapse/api/presence.py
@@ -12,49 +12,48 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from collections import namedtuple
+from typing import Any, Optional
+
+import attr
from synapse.api.constants import PresenceState
+from synapse.types import JsonDict
-class UserPresenceState(
- namedtuple(
- "UserPresenceState",
- (
- "user_id",
- "state",
- "last_active_ts",
- "last_federation_update_ts",
- "last_user_sync_ts",
- "status_msg",
- "currently_active",
- ),
- )
-):
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class UserPresenceState:
"""Represents the current presence state of the user.
- user_id (str)
- last_active (int): Time in msec that the user last interacted with server.
- last_federation_update (int): Time in msec since either a) we sent a presence
+ user_id
+ last_active: Time in msec that the user last interacted with server.
+ last_federation_update: Time in msec since either a) we sent a presence
update to other servers or b) we received a presence update, depending
on if is a local user or not.
- last_user_sync (int): Time in msec that the user last *completed* a sync
+ last_user_sync: Time in msec that the user last *completed* a sync
(or event stream).
- status_msg (str): User set status message.
+ status_msg: User set status message.
"""
- def as_dict(self):
- return dict(self._asdict())
+ user_id: str
+ state: str
+ last_active_ts: int
+ last_federation_update_ts: int
+ last_user_sync_ts: int
+ status_msg: Optional[str]
+ currently_active: bool
+
+ def as_dict(self) -> JsonDict:
+ return attr.asdict(self)
@staticmethod
- def from_dict(d):
+ def from_dict(d: JsonDict) -> "UserPresenceState":
return UserPresenceState(**d)
- def copy_and_replace(self, **kwargs):
- return self._replace(**kwargs)
+ def copy_and_replace(self, **kwargs: Any) -> "UserPresenceState":
+ return attr.evolve(self, **kwargs)
@classmethod
- def default(cls, user_id):
+ def default(cls, user_id: str) -> "UserPresenceState":
"""Returns a default presence state."""
return cls(
user_id=user_id,
diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py
index e8964097d3..849c18ceda 100644
--- a/synapse/api/ratelimiting.py
+++ b/synapse/api/ratelimiting.py
@@ -161,7 +161,7 @@ class Ratelimiter:
return allowed, time_allowed
- def _prune_message_counts(self, time_now_s: float):
+ def _prune_message_counts(self, time_now_s: float) -> None:
"""Remove message count entries that have not exceeded their defined
rate_hz limit
@@ -190,7 +190,7 @@ class Ratelimiter:
update: bool = True,
n_actions: int = 1,
_time_now_s: Optional[float] = None,
- ):
+ ) -> None:
"""Checks if an action can be performed. If not, raises a LimitExceededError
Checks if the user has ratelimiting disabled in the database by looking
diff --git a/synapse/api/urls.py b/synapse/api/urls.py
index 032c69b210..6e84b1524f 100644
--- a/synapse/api/urls.py
+++ b/synapse/api/urls.py
@@ -19,6 +19,7 @@ from hashlib import sha256
from urllib.parse import urlencode
from synapse.config import ConfigError
+from synapse.config.homeserver import HomeServerConfig
SYNAPSE_CLIENT_API_PREFIX = "/_synapse/client"
CLIENT_API_PREFIX = "/_matrix/client"
@@ -34,11 +35,7 @@ LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
class ConsentURIBuilder:
- def __init__(self, hs_config):
- """
- Args:
- hs_config (synapse.config.homeserver.HomeServerConfig):
- """
+ def __init__(self, hs_config: HomeServerConfig):
if hs_config.key.form_secret is None:
raise ConfigError("form_secret not set in config")
if hs_config.server.public_baseurl is None:
@@ -47,15 +44,15 @@ class ConsentURIBuilder:
self._hmac_secret = hs_config.key.form_secret.encode("utf-8")
self._public_baseurl = hs_config.server.public_baseurl
- def build_user_consent_uri(self, user_id):
+ def build_user_consent_uri(self, user_id: str) -> str:
"""Build a URI which we can give to the user to do their privacy
policy consent
Args:
- user_id (str): mxid or username of user
+ user_id: mxid or username of user
Returns
- (str) the URI where the user can do consent
+ The URI where the user can do consent
"""
mac = hmac.new(
key=self._hmac_secret, msg=user_id.encode("ascii"), digestmod=sha256
diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py
index 13d20af457..b156b93bf3 100644
--- a/synapse/app/admin_cmd.py
+++ b/synapse/app/admin_cmd.py
@@ -39,6 +39,7 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.server import HomeServer
+from synapse.storage.databases.main.room import RoomWorkerStore
from synapse.util.logcontext import LoggingContext
from synapse.util.versionstring import get_version_string
@@ -58,6 +59,7 @@ class AdminCmdSlavedStore(
SlavedEventStore,
SlavedClientIpStore,
BaseSlavedStore,
+ RoomWorkerStore,
):
pass
@@ -185,11 +187,7 @@ def start(config_options):
# a full worker config.
config.worker.worker_app = "synapse.app.admin_cmd"
- if (
- not config.worker.worker_daemonize
- and not config.worker.worker_log_file
- and not config.worker.worker_log_config
- ):
+ if not config.worker.worker_daemonize and not config.worker.worker_log_config:
# Since we're meant to be run as a "command" let's not redirect stdio
# unless we've actually set log config.
config.logging.no_redirect_stdio = True
@@ -198,9 +196,9 @@ def start(config_options):
config.server.update_user_directory = False
config.worker.run_background_tasks = False
config.worker.start_pushers = False
- config.pusher_shard_config.instances = []
+ config.worker.pusher_shard_config.instances = []
config.worker.send_federation = False
- config.federation_shard_config.instances = []
+ config.worker.federation_shard_config.instances = []
synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts
@@ -221,7 +219,7 @@ def start(config_options):
async def run():
with LoggingContext("command"):
- _base.start(ss)
+ await _base.start(ss)
await args.func(ss, args)
_base.start_worker_reactor(
diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi
index 06fbd1166b..c1d9069798 100644
--- a/synapse/config/_base.pyi
+++ b/synapse/config/_base.pyi
@@ -26,6 +26,7 @@ from synapse.config import (
redis,
registration,
repository,
+ retention,
room_directory,
saml2,
server,
@@ -91,6 +92,7 @@ class RootConfig:
modules: modules.ModulesConfig
caches: cache.CacheConfig
federation: federation.FederationConfig
+ retention: retention.RetentionConfig
config_classes: List = ...
def __init__(self) -> None: ...
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 7b0381c06a..b013a3918c 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -24,6 +24,9 @@ class ExperimentalConfig(Config):
def read_config(self, config: JsonDict, **kwargs):
experimental = config.get("experimental_features") or {}
+ # Whether to enable experimental MSC1849 (aka relations) support
+ self.msc1849_enabled = config.get("experimental_msc1849_support_enabled", True)
+
# MSC3026 (busy presence state)
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index 442f1b9ac0..001605c265 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -38,6 +38,7 @@ from .ratelimiting import RatelimitConfig
from .redis import RedisConfig
from .registration import RegistrationConfig
from .repository import ContentRepositoryConfig
+from .retention import RetentionConfig
from .room import RoomConfig
from .room_directory import RoomDirectoryConfig
from .saml2 import SAML2Config
@@ -59,6 +60,7 @@ class HomeServerConfig(RootConfig):
config_classes = [
ModulesConfig,
ServerConfig,
+ RetentionConfig,
TlsConfig,
FederationConfig,
CacheConfig,
diff --git a/synapse/config/retention.py b/synapse/config/retention.py
new file mode 100644
index 0000000000..aed9bf458f
--- /dev/null
+++ b/synapse/config/retention.py
@@ -0,0 +1,226 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+from typing import List, Optional
+
+import attr
+
+from synapse.config._base import Config, ConfigError
+
+logger = logging.getLogger(__name__)
+
+
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class RetentionPurgeJob:
+ """Object describing the configuration of the manhole"""
+
+ interval: int
+ shortest_max_lifetime: Optional[int]
+ longest_max_lifetime: Optional[int]
+
+
+class RetentionConfig(Config):
+ section = "retention"
+
+ def read_config(self, config, **kwargs):
+ retention_config = config.get("retention")
+ if retention_config is None:
+ retention_config = {}
+
+ self.retention_enabled = retention_config.get("enabled", False)
+
+ retention_default_policy = retention_config.get("default_policy")
+
+ if retention_default_policy is not None:
+ self.retention_default_min_lifetime = retention_default_policy.get(
+ "min_lifetime"
+ )
+ if self.retention_default_min_lifetime is not None:
+ self.retention_default_min_lifetime = self.parse_duration(
+ self.retention_default_min_lifetime
+ )
+
+ self.retention_default_max_lifetime = retention_default_policy.get(
+ "max_lifetime"
+ )
+ if self.retention_default_max_lifetime is not None:
+ self.retention_default_max_lifetime = self.parse_duration(
+ self.retention_default_max_lifetime
+ )
+
+ if (
+ self.retention_default_min_lifetime is not None
+ and self.retention_default_max_lifetime is not None
+ and (
+ self.retention_default_min_lifetime
+ > self.retention_default_max_lifetime
+ )
+ ):
+ raise ConfigError(
+ "The default retention policy's 'min_lifetime' can not be greater"
+ " than its 'max_lifetime'"
+ )
+ else:
+ self.retention_default_min_lifetime = None
+ self.retention_default_max_lifetime = None
+
+ if self.retention_enabled:
+ logger.info(
+ "Message retention policies support enabled with the following default"
+ " policy: min_lifetime = %s ; max_lifetime = %s",
+ self.retention_default_min_lifetime,
+ self.retention_default_max_lifetime,
+ )
+
+ self.retention_allowed_lifetime_min = retention_config.get(
+ "allowed_lifetime_min"
+ )
+ if self.retention_allowed_lifetime_min is not None:
+ self.retention_allowed_lifetime_min = self.parse_duration(
+ self.retention_allowed_lifetime_min
+ )
+
+ self.retention_allowed_lifetime_max = retention_config.get(
+ "allowed_lifetime_max"
+ )
+ if self.retention_allowed_lifetime_max is not None:
+ self.retention_allowed_lifetime_max = self.parse_duration(
+ self.retention_allowed_lifetime_max
+ )
+
+ if (
+ self.retention_allowed_lifetime_min is not None
+ and self.retention_allowed_lifetime_max is not None
+ and self.retention_allowed_lifetime_min
+ > self.retention_allowed_lifetime_max
+ ):
+ raise ConfigError(
+ "Invalid retention policy limits: 'allowed_lifetime_min' can not be"
+ " greater than 'allowed_lifetime_max'"
+ )
+
+ self.retention_purge_jobs: List[RetentionPurgeJob] = []
+ for purge_job_config in retention_config.get("purge_jobs", []):
+ interval_config = purge_job_config.get("interval")
+
+ if interval_config is None:
+ raise ConfigError(
+ "A retention policy's purge jobs configuration must have the"
+ " 'interval' key set."
+ )
+
+ interval = self.parse_duration(interval_config)
+
+ shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
+
+ if shortest_max_lifetime is not None:
+ shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
+
+ longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
+
+ if longest_max_lifetime is not None:
+ longest_max_lifetime = self.parse_duration(longest_max_lifetime)
+
+ if (
+ shortest_max_lifetime is not None
+ and longest_max_lifetime is not None
+ and shortest_max_lifetime > longest_max_lifetime
+ ):
+ raise ConfigError(
+ "A retention policy's purge jobs configuration's"
+ " 'shortest_max_lifetime' value can not be greater than its"
+ " 'longest_max_lifetime' value."
+ )
+
+ self.retention_purge_jobs.append(
+ RetentionPurgeJob(interval, shortest_max_lifetime, longest_max_lifetime)
+ )
+
+ if not self.retention_purge_jobs:
+ self.retention_purge_jobs = [
+ RetentionPurgeJob(self.parse_duration("1d"), None, None)
+ ]
+
+ def generate_config_section(self, config_dir_path, server_name, **kwargs):
+ return """\
+ # Message retention policy at the server level.
+ #
+ # Room admins and mods can define a retention period for their rooms using the
+ # 'm.room.retention' state event, and server admins can cap this period by setting
+ # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
+ #
+ # If this feature is enabled, Synapse will regularly look for and purge events
+ # which are older than the room's maximum retention period. Synapse will also
+ # filter events received over federation so that events that should have been
+ # purged are ignored and not stored again.
+ #
+ retention:
+ # The message retention policies feature is disabled by default. Uncomment the
+ # following line to enable it.
+ #
+ #enabled: true
+
+ # Default retention policy. If set, Synapse will apply it to rooms that lack the
+ # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
+ # matter much because Synapse doesn't take it into account yet.
+ #
+ #default_policy:
+ # min_lifetime: 1d
+ # max_lifetime: 1y
+
+ # Retention policy limits. If set, and the state of a room contains a
+ # 'm.room.retention' event in its state which contains a 'min_lifetime' or a
+ # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
+ # to these limits when running purge jobs.
+ #
+ #allowed_lifetime_min: 1d
+ #allowed_lifetime_max: 1y
+
+ # Server admins can define the settings of the background jobs purging the
+ # events which lifetime has expired under the 'purge_jobs' section.
+ #
+ # If no configuration is provided, a single job will be set up to delete expired
+ # events in every room daily.
+ #
+ # Each job's configuration defines which range of message lifetimes the job
+ # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
+ # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
+ # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
+ # lower than or equal to 3 days. Both the minimum and the maximum value of a
+ # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
+ # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
+ # which 'max_lifetime' is lower than or equal to three days.
+ #
+ # The rationale for this per-job configuration is that some rooms might have a
+ # retention policy with a low 'max_lifetime', where history needs to be purged
+ # of outdated messages on a more frequent basis than for the rest of the rooms
+ # (e.g. every 12h), but not want that purge to be performed by a job that's
+ # iterating over every room it knows, which could be heavy on the server.
+ #
+ # If any purge job is configured, it is strongly recommended to have at least
+ # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
+ # set, or one job without 'shortest_max_lifetime' and one job without
+ # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
+ # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
+ # room's policy to these values is done after the policies are retrieved from
+ # Synapse's database (which is done using the range specified in a purge job's
+ # configuration).
+ #
+ #purge_jobs:
+ # - longest_max_lifetime: 3d
+ # interval: 12h
+ # - shortest_max_lifetime: 3d
+ # interval: 1d
+ """
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 818b806357..ed094bdc44 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -225,15 +225,6 @@ class ManholeConfig:
pub_key: Optional[Key]
-@attr.s(slots=True, frozen=True, auto_attribs=True)
-class RetentionConfig:
- """Object describing the configuration of the manhole"""
-
- interval: int
- shortest_max_lifetime: Optional[int]
- longest_max_lifetime: Optional[int]
-
-
@attr.s(frozen=True)
class LimitRemoteRoomsConfig:
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
@@ -376,11 +367,6 @@ class ServerConfig(Config):
# (other than those sent by local server admins)
self.block_non_admin_invites = config.get("block_non_admin_invites", False)
- # Whether to enable experimental MSC1849 (aka relations) support
- self.experimental_msc1849_support_enabled = config.get(
- "experimental_msc1849_support_enabled", True
- )
-
# Options to control access by tracking MAU
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
self.max_mau_value = 0
@@ -466,124 +452,6 @@ class ServerConfig(Config):
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
- retention_config = config.get("retention")
- if retention_config is None:
- retention_config = {}
-
- self.retention_enabled = retention_config.get("enabled", False)
-
- retention_default_policy = retention_config.get("default_policy")
-
- if retention_default_policy is not None:
- self.retention_default_min_lifetime = retention_default_policy.get(
- "min_lifetime"
- )
- if self.retention_default_min_lifetime is not None:
- self.retention_default_min_lifetime = self.parse_duration(
- self.retention_default_min_lifetime
- )
-
- self.retention_default_max_lifetime = retention_default_policy.get(
- "max_lifetime"
- )
- if self.retention_default_max_lifetime is not None:
- self.retention_default_max_lifetime = self.parse_duration(
- self.retention_default_max_lifetime
- )
-
- if (
- self.retention_default_min_lifetime is not None
- and self.retention_default_max_lifetime is not None
- and (
- self.retention_default_min_lifetime
- > self.retention_default_max_lifetime
- )
- ):
- raise ConfigError(
- "The default retention policy's 'min_lifetime' can not be greater"
- " than its 'max_lifetime'"
- )
- else:
- self.retention_default_min_lifetime = None
- self.retention_default_max_lifetime = None
-
- if self.retention_enabled:
- logger.info(
- "Message retention policies support enabled with the following default"
- " policy: min_lifetime = %s ; max_lifetime = %s",
- self.retention_default_min_lifetime,
- self.retention_default_max_lifetime,
- )
-
- self.retention_allowed_lifetime_min = retention_config.get(
- "allowed_lifetime_min"
- )
- if self.retention_allowed_lifetime_min is not None:
- self.retention_allowed_lifetime_min = self.parse_duration(
- self.retention_allowed_lifetime_min
- )
-
- self.retention_allowed_lifetime_max = retention_config.get(
- "allowed_lifetime_max"
- )
- if self.retention_allowed_lifetime_max is not None:
- self.retention_allowed_lifetime_max = self.parse_duration(
- self.retention_allowed_lifetime_max
- )
-
- if (
- self.retention_allowed_lifetime_min is not None
- and self.retention_allowed_lifetime_max is not None
- and self.retention_allowed_lifetime_min
- > self.retention_allowed_lifetime_max
- ):
- raise ConfigError(
- "Invalid retention policy limits: 'allowed_lifetime_min' can not be"
- " greater than 'allowed_lifetime_max'"
- )
-
- self.retention_purge_jobs: List[RetentionConfig] = []
- for purge_job_config in retention_config.get("purge_jobs", []):
- interval_config = purge_job_config.get("interval")
-
- if interval_config is None:
- raise ConfigError(
- "A retention policy's purge jobs configuration must have the"
- " 'interval' key set."
- )
-
- interval = self.parse_duration(interval_config)
-
- shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
-
- if shortest_max_lifetime is not None:
- shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
-
- longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
-
- if longest_max_lifetime is not None:
- longest_max_lifetime = self.parse_duration(longest_max_lifetime)
-
- if (
- shortest_max_lifetime is not None
- and longest_max_lifetime is not None
- and shortest_max_lifetime > longest_max_lifetime
- ):
- raise ConfigError(
- "A retention policy's purge jobs configuration's"
- " 'shortest_max_lifetime' value can not be greater than its"
- " 'longest_max_lifetime' value."
- )
-
- self.retention_purge_jobs.append(
- RetentionConfig(interval, shortest_max_lifetime, longest_max_lifetime)
- )
-
- if not self.retention_purge_jobs:
- self.retention_purge_jobs = [
- RetentionConfig(self.parse_duration("1d"), None, None)
- ]
-
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
# no_tls is not really supported any more, but let's grandfather it in
@@ -1255,75 +1123,6 @@ class ServerConfig(Config):
#
#user_ips_max_age: 14d
- # Message retention policy at the server level.
- #
- # Room admins and mods can define a retention period for their rooms using the
- # 'm.room.retention' state event, and server admins can cap this period by setting
- # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
- #
- # If this feature is enabled, Synapse will regularly look for and purge events
- # which are older than the room's maximum retention period. Synapse will also
- # filter events received over federation so that events that should have been
- # purged are ignored and not stored again.
- #
- retention:
- # The message retention policies feature is disabled by default. Uncomment the
- # following line to enable it.
- #
- #enabled: true
-
- # Default retention policy. If set, Synapse will apply it to rooms that lack the
- # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
- # matter much because Synapse doesn't take it into account yet.
- #
- #default_policy:
- # min_lifetime: 1d
- # max_lifetime: 1y
-
- # Retention policy limits. If set, and the state of a room contains a
- # 'm.room.retention' event in its state which contains a 'min_lifetime' or a
- # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
- # to these limits when running purge jobs.
- #
- #allowed_lifetime_min: 1d
- #allowed_lifetime_max: 1y
-
- # Server admins can define the settings of the background jobs purging the
- # events which lifetime has expired under the 'purge_jobs' section.
- #
- # If no configuration is provided, a single job will be set up to delete expired
- # events in every room daily.
- #
- # Each job's configuration defines which range of message lifetimes the job
- # takes care of. For example, if 'shortest_max_lifetime' is '2d' and
- # 'longest_max_lifetime' is '3d', the job will handle purging expired events in
- # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
- # lower than or equal to 3 days. Both the minimum and the maximum value of a
- # range are optional, e.g. a job with no 'shortest_max_lifetime' and a
- # 'longest_max_lifetime' of '3d' will handle every room with a retention policy
- # which 'max_lifetime' is lower than or equal to three days.
- #
- # The rationale for this per-job configuration is that some rooms might have a
- # retention policy with a low 'max_lifetime', where history needs to be purged
- # of outdated messages on a more frequent basis than for the rest of the rooms
- # (e.g. every 12h), but not want that purge to be performed by a job that's
- # iterating over every room it knows, which could be heavy on the server.
- #
- # If any purge job is configured, it is strongly recommended to have at least
- # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
- # set, or one job without 'shortest_max_lifetime' and one job without
- # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
- # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
- # room's policy to these values is done after the policies are retrieved from
- # Synapse's database (which is done using the range specified in a purge job's
- # configuration).
- #
- #purge_jobs:
- # - longest_max_lifetime: 3d
- # interval: 12h
- # - shortest_max_lifetime: 3d
- # interval: 1d
-
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver.
diff --git a/synapse/event_auth.py b/synapse/event_auth.py
index ca0293a3dc..e885961698 100644
--- a/synapse/event_auth.py
+++ b/synapse/event_auth.py
@@ -14,7 +14,7 @@
# limitations under the License.
import logging
-from typing import Any, Dict, List, Optional, Set, Tuple, Union
+from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
@@ -113,7 +113,7 @@ def validate_event_for_room_version(
def check_auth_rules_for_event(
- room_version_obj: RoomVersion, event: EventBase, auth_events: StateMap[EventBase]
+ room_version_obj: RoomVersion, event: EventBase, auth_events: Iterable[EventBase]
) -> None:
"""Check that an event complies with the auth rules
@@ -137,8 +137,6 @@ def check_auth_rules_for_event(
Raises:
AuthError if the checks fail
"""
- assert isinstance(auth_events, dict)
-
# We need to ensure that the auth events are actually for the same room, to
# stop people from using powers they've been granted in other rooms for
# example.
@@ -147,7 +145,7 @@ def check_auth_rules_for_event(
# the state res algorithm isn't silly enough to give us events from different rooms.
# Still, it's easier to do it anyway.
room_id = event.room_id
- for auth_event in auth_events.values():
+ for auth_event in auth_events:
if auth_event.room_id != room_id:
raise AuthError(
403,
@@ -186,8 +184,10 @@ def check_auth_rules_for_event(
logger.debug("Allowing! %s", event)
return
+ auth_dict = {(e.type, e.state_key): e for e in auth_events}
+
# 3. If event does not have a m.room.create in its auth_events, reject.
- creation_event = auth_events.get((EventTypes.Create, ""), None)
+ creation_event = auth_dict.get((EventTypes.Create, ""), None)
if not creation_event:
raise AuthError(403, "No create event in auth events")
@@ -195,7 +195,7 @@ def check_auth_rules_for_event(
creating_domain = get_domain_from_id(event.room_id)
originating_domain = get_domain_from_id(event.sender)
if creating_domain != originating_domain:
- if not _can_federate(event, auth_events):
+ if not _can_federate(event, auth_dict):
raise AuthError(403, "This room has been marked as unfederatable.")
# 4. If type is m.room.aliases
@@ -217,23 +217,20 @@ def check_auth_rules_for_event(
logger.debug("Allowing! %s", event)
return
- if logger.isEnabledFor(logging.DEBUG):
- logger.debug("Auth events: %s", [a.event_id for a in auth_events.values()])
-
# 5. If type is m.room.membership
if event.type == EventTypes.Member:
- _is_membership_change_allowed(room_version_obj, event, auth_events)
+ _is_membership_change_allowed(room_version_obj, event, auth_dict)
logger.debug("Allowing! %s", event)
return
- _check_event_sender_in_room(event, auth_events)
+ _check_event_sender_in_room(event, auth_dict)
# Special case to allow m.room.third_party_invite events wherever
# a user is allowed to issue invites. Fixes
# https://github.com/vector-im/vector-web/issues/1208 hopefully
if event.type == EventTypes.ThirdPartyInvite:
- user_level = get_user_power_level(event.user_id, auth_events)
- invite_level = get_named_level(auth_events, "invite", 0)
+ user_level = get_user_power_level(event.user_id, auth_dict)
+ invite_level = get_named_level(auth_dict, "invite", 0)
if user_level < invite_level:
raise AuthError(403, "You don't have permission to invite users")
@@ -241,20 +238,20 @@ def check_auth_rules_for_event(
logger.debug("Allowing! %s", event)
return
- _can_send_event(event, auth_events)
+ _can_send_event(event, auth_dict)
if event.type == EventTypes.PowerLevels:
- _check_power_levels(room_version_obj, event, auth_events)
+ _check_power_levels(room_version_obj, event, auth_dict)
if event.type == EventTypes.Redaction:
- check_redaction(room_version_obj, event, auth_events)
+ check_redaction(room_version_obj, event, auth_dict)
if (
event.type == EventTypes.MSC2716_INSERTION
or event.type == EventTypes.MSC2716_BATCH
or event.type == EventTypes.MSC2716_MARKER
):
- check_historical(room_version_obj, event, auth_events)
+ check_historical(room_version_obj, event, auth_dict)
logger.debug("Allowing! %s", event)
diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py
index 49190459c8..157669ea88 100644
--- a/synapse/events/__init__.py
+++ b/synapse/events/__init__.py
@@ -348,12 +348,16 @@ class EventBase(metaclass=abc.ABCMeta):
return self.__repr__()
def __repr__(self):
- return "<%s event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
- self.__class__.__name__,
- self.event_id,
- self.get("type", None),
- self.get("state_key", None),
- self.internal_metadata.is_outlier(),
+ rejection = f"REJECTED={self.rejected_reason}, " if self.rejected_reason else ""
+
+ return (
+ f"<{self.__class__.__name__} "
+ f"{rejection}"
+ f"event_id={self.event_id}, "
+ f"type={self.get('type')}, "
+ f"state_key={self.get('state_key')}, "
+ f"outlier={self.internal_metadata.is_outlier()}"
+ ">"
)
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index 23bd24d963..3f3eba86a8 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -385,9 +385,7 @@ class EventClientSerializer:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
- self.experimental_msc1849_support_enabled = (
- hs.config.server.experimental_msc1849_support_enabled
- )
+ self._msc1849_enabled = hs.config.experimental.msc1849_enabled
async def serialize_event(
self,
@@ -418,7 +416,7 @@ class EventClientSerializer:
# we need to bundle in with the event.
# Do not bundle relations if the event has been redacted
if not event.internal_metadata.is_redacted() and (
- self.experimental_msc1849_support_enabled and bundle_aggregations
+ self._msc1849_enabled and bundle_aggregations
):
annotations = await self.store.get_aggregation_groups_for_event(event_id)
references = await self.store.get_relations_for_event(
diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py
index d089c56286..365063ebdf 100644
--- a/synapse/handlers/event_auth.py
+++ b/synapse/handlers/event_auth.py
@@ -55,8 +55,7 @@ class EventAuthHandler:
"""Check an event passes the auth rules at its own auth events"""
auth_event_ids = event.auth_event_ids()
auth_events_by_id = await self._store.get_events(auth_event_ids)
- auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()}
- check_auth_rules_for_event(room_version_obj, event, auth_events)
+ check_auth_rules_for_event(room_version_obj, event, auth_events_by_id.values())
def compute_auth_events(
self,
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 2d60ea6d80..c478e0bc5c 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -15,7 +15,6 @@
"""Contains handlers for federation events."""
-import itertools
import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
@@ -27,12 +26,7 @@ from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse import event_auth
-from synapse.api.constants import (
- EventContentFields,
- EventTypes,
- Membership,
- RejectedReason,
-)
+from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.errors import (
AuthError,
CodeMessageException,
@@ -43,12 +37,9 @@ from synapse.api.errors import (
RequestSendFailed,
SynapseError,
)
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.crypto.event_signing import compute_event_signature
-from synapse.event_auth import (
- check_auth_rules_for_event,
- validate_event_for_room_version,
-)
+from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.events.validator import EventValidator
@@ -525,7 +516,7 @@ class FederationHandler:
auth_events=auth_chain,
)
- max_stream_id = await self._persist_auth_tree(
+ max_stream_id = await self._federation_event_handler.process_remote_join(
origin, room_id, auth_chain, state, event, room_version_obj
)
@@ -1101,119 +1092,6 @@ class FederationHandler:
else:
return None
- async def _persist_auth_tree(
- self,
- origin: str,
- room_id: str,
- auth_events: List[EventBase],
- state: List[EventBase],
- event: EventBase,
- room_version: RoomVersion,
- ) -> int:
- """Checks the auth chain is valid (and passes auth checks) for the
- state and event. Then persists the auth chain and state atomically.
- Persists the event separately. Notifies about the persisted events
- where appropriate.
-
- Will attempt to fetch missing auth events.
-
- Args:
- origin: Where the events came from
- room_id,
- auth_events
- state
- event
- room_version: The room version we expect this room to have, and
- will raise if it doesn't match the version in the create event.
- """
- events_to_context = {}
- for e in itertools.chain(auth_events, state):
- e.internal_metadata.outlier = True
- events_to_context[e.event_id] = EventContext.for_outlier()
-
- event_map = {
- e.event_id: e for e in itertools.chain(auth_events, state, [event])
- }
-
- create_event = None
- for e in auth_events:
- if (e.type, e.state_key) == (EventTypes.Create, ""):
- create_event = e
- break
-
- if create_event is None:
- # If the state doesn't have a create event then the room is
- # invalid, and it would fail auth checks anyway.
- raise SynapseError(400, "No create event in state")
-
- room_version_id = create_event.content.get(
- "room_version", RoomVersions.V1.identifier
- )
-
- if room_version.identifier != room_version_id:
- raise SynapseError(400, "Room version mismatch")
-
- missing_auth_events = set()
- for e in itertools.chain(auth_events, state, [event]):
- for e_id in e.auth_event_ids():
- if e_id not in event_map:
- missing_auth_events.add(e_id)
-
- for e_id in missing_auth_events:
- m_ev = await self.federation_client.get_pdu(
- [origin],
- e_id,
- room_version=room_version,
- outlier=True,
- timeout=10000,
- )
- if m_ev and m_ev.event_id == e_id:
- event_map[e_id] = m_ev
- else:
- logger.info("Failed to find auth event %r", e_id)
-
- for e in itertools.chain(auth_events, state, [event]):
- auth_for_e = {
- (event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
- for e_id in e.auth_event_ids()
- if e_id in event_map
- }
- if create_event:
- auth_for_e[(EventTypes.Create, "")] = create_event
-
- try:
- validate_event_for_room_version(room_version, e)
- check_auth_rules_for_event(room_version, e, auth_for_e)
- except SynapseError as err:
- # we may get SynapseErrors here as well as AuthErrors. For
- # instance, there are a couple of (ancient) events in some
- # rooms whose senders do not have the correct sigil; these
- # cause SynapseErrors in auth.check. We don't want to give up
- # the attempt to federate altogether in such cases.
-
- logger.warning("Rejecting %s because %s", e.event_id, err.msg)
-
- if e == event:
- raise
- events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
-
- if auth_events or state:
- await self._federation_event_handler.persist_events_and_notify(
- room_id,
- [
- (e, events_to_context[e.event_id])
- for e in itertools.chain(auth_events, state)
- ],
- )
-
- new_event_context = await self.state_handler.compute_event_context(
- event, old_state=state
- )
-
- return await self._federation_event_handler.persist_events_and_notify(
- room_id, [(event, new_event_context)]
- )
-
async def on_get_missing_events(
self,
origin: str,
diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py
index 5edcb91403..610a4e48c5 100644
--- a/synapse/handlers/federation_event.py
+++ b/synapse/handlers/federation_event.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import itertools
import logging
from http import HTTPStatus
from typing import (
@@ -45,7 +46,7 @@ from synapse.api.errors import (
RequestSendFailed,
SynapseError,
)
-from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
+from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions
from synapse.event_auth import (
auth_types_for_event,
check_auth_rules_for_event,
@@ -64,7 +65,6 @@ from synapse.replication.http.federation import (
from synapse.state import StateResolutionStore
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.types import (
- MutableStateMap,
PersistedEventPosition,
RoomStreamToken,
StateMap,
@@ -214,7 +214,7 @@ class FederationEventHandler:
if missing_prevs:
# We only backfill backwards to the min depth.
- min_depth = await self.get_min_depth_for_context(pdu.room_id)
+ min_depth = await self._store.get_min_depth(pdu.room_id)
logger.debug("min_depth: %d", min_depth)
if min_depth is not None and pdu.depth > min_depth:
@@ -390,6 +390,119 @@ class FederationEventHandler:
prev_member_event,
)
+ async def process_remote_join(
+ self,
+ origin: str,
+ room_id: str,
+ auth_events: List[EventBase],
+ state: List[EventBase],
+ event: EventBase,
+ room_version: RoomVersion,
+ ) -> int:
+ """Persists the events returned by a send_join
+
+ Checks the auth chain is valid (and passes auth checks) for the
+ state and event. Then persists the auth chain and state atomically.
+ Persists the event separately. Notifies about the persisted events
+ where appropriate.
+
+ Will attempt to fetch missing auth events.
+
+ Args:
+ origin: Where the events came from
+ room_id,
+ auth_events
+ state
+ event
+ room_version: The room version we expect this room to have, and
+ will raise if it doesn't match the version in the create event.
+ """
+ events_to_context = {}
+ for e in itertools.chain(auth_events, state):
+ e.internal_metadata.outlier = True
+ events_to_context[e.event_id] = EventContext.for_outlier()
+
+ event_map = {
+ e.event_id: e for e in itertools.chain(auth_events, state, [event])
+ }
+
+ create_event = None
+ for e in auth_events:
+ if (e.type, e.state_key) == (EventTypes.Create, ""):
+ create_event = e
+ break
+
+ if create_event is None:
+ # If the state doesn't have a create event then the room is
+ # invalid, and it would fail auth checks anyway.
+ raise SynapseError(400, "No create event in state")
+
+ room_version_id = create_event.content.get(
+ "room_version", RoomVersions.V1.identifier
+ )
+
+ if room_version.identifier != room_version_id:
+ raise SynapseError(400, "Room version mismatch")
+
+ missing_auth_events = set()
+ for e in itertools.chain(auth_events, state, [event]):
+ for e_id in e.auth_event_ids():
+ if e_id not in event_map:
+ missing_auth_events.add(e_id)
+
+ for e_id in missing_auth_events:
+ m_ev = await self._federation_client.get_pdu(
+ [origin],
+ e_id,
+ room_version=room_version,
+ outlier=True,
+ timeout=10000,
+ )
+ if m_ev and m_ev.event_id == e_id:
+ event_map[e_id] = m_ev
+ else:
+ logger.info("Failed to find auth event %r", e_id)
+
+ for e in itertools.chain(auth_events, state, [event]):
+ auth_for_e = [
+ event_map[e_id] for e_id in e.auth_event_ids() if e_id in event_map
+ ]
+ if create_event:
+ auth_for_e.append(create_event)
+
+ try:
+ validate_event_for_room_version(room_version, e)
+ check_auth_rules_for_event(room_version, e, auth_for_e)
+ except SynapseError as err:
+ # we may get SynapseErrors here as well as AuthErrors. For
+ # instance, there are a couple of (ancient) events in some
+ # rooms whose senders do not have the correct sigil; these
+ # cause SynapseErrors in auth.check. We don't want to give up
+ # the attempt to federate altogether in such cases.
+
+ logger.warning("Rejecting %s because %s", e.event_id, err.msg)
+
+ if e == event:
+ raise
+ events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
+
+ if auth_events or state:
+ await self.persist_events_and_notify(
+ room_id,
+ [
+ (e, events_to_context[e.event_id])
+ for e in itertools.chain(auth_events, state)
+ ],
+ )
+
+ new_event_context = await self._state_handler.compute_event_context(
+ event, old_state=state
+ )
+
+ return await self.persist_events_and_notify(
+ room_id, [(event, new_event_context)]
+ )
+
@log_function
async def backfill(
self, dest: str, room_id: str, limit: int, extremities: Iterable[str]
@@ -1120,14 +1233,12 @@ class FederationEventHandler:
await concurrently_execute(get_event, event_ids, 5)
logger.info("Fetched %i events of %i requested", len(events), len(event_ids))
- await self._auth_and_persist_fetched_events(destination, room_id, events)
+ await self._auth_and_persist_outliers(room_id, events)
- async def _auth_and_persist_fetched_events(
- self, origin: str, room_id: str, events: Iterable[EventBase]
+ async def _auth_and_persist_outliers(
+ self, room_id: str, events: Iterable[EventBase]
) -> None:
- """Persist the events fetched by _get_events_and_persist or _get_remote_auth_chain_for_event
-
- The events to be persisted must be outliers.
+ """Persist a batch of outlier events fetched from remote servers.
We first sort the events to make sure that we process each event's auth_events
before the event itself, and then auth and persist them.
@@ -1135,7 +1246,6 @@ class FederationEventHandler:
Notifies about the events where appropriate.
Params:
- origin: where the events came from
room_id: the room that the events are meant to be in (though this has
not yet been checked)
events: the events that have been fetched
@@ -1171,15 +1281,15 @@ class FederationEventHandler:
shortstr(e.event_id for e in roots),
)
- await self._auth_and_persist_fetched_events_inner(origin, room_id, roots)
+ await self._auth_and_persist_outliers_inner(room_id, roots)
for ev in roots:
del event_map[ev.event_id]
- async def _auth_and_persist_fetched_events_inner(
- self, origin: str, room_id: str, fetched_events: Collection[EventBase]
+ async def _auth_and_persist_outliers_inner(
+ self, room_id: str, fetched_events: Collection[EventBase]
) -> None:
- """Helper for _auth_and_persist_fetched_events
+ """Helper for _auth_and_persist_outliers
Persists a batch of events where we have (theoretically) already persisted all
of their auth events.
@@ -1207,7 +1317,7 @@ class FederationEventHandler:
def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]:
with nested_logging_context(suffix=event.event_id):
- auth = {}
+ auth = []
for auth_event_id in event.auth_event_ids():
ae = persisted_events.get(auth_event_id)
if not ae:
@@ -1220,7 +1330,7 @@ class FederationEventHandler:
# exist, which means it is premature to reject `event`. Instead we
# just ignore it for now.
return None
- auth[(ae.type, ae.state_key)] = ae
+ auth.append(ae)
context = EventContext.for_outlier()
try:
@@ -1265,6 +1375,10 @@ class FederationEventHandler:
Returns:
The updated context object.
+
+ Raises:
+ AuthError if we were unable to find copies of the event's auth events.
+ (Most other failures just cause us to set `context.rejected`.)
"""
# This method should only be used for non-outliers
assert not event.internal_metadata.outlier
@@ -1281,7 +1395,26 @@ class FederationEventHandler:
context.rejected = RejectedReason.AUTH_ERROR
return context
- # calculate what the auth events *should* be, to use as a basis for auth.
+ # next, check that we have all of the event's auth events.
+ #
+ # Note that this can raise AuthError, which we want to propagate to the
+ # caller rather than swallow with `context.rejected` (since we cannot be
+ # certain that there is a permanent problem with the event).
+ claimed_auth_events = await self._load_or_fetch_auth_events_for_event(
+ origin, event
+ )
+
+ # ... and check that the event passes auth at those auth events.
+ try:
+ check_auth_rules_for_event(room_version_obj, event, claimed_auth_events)
+ except AuthError as e:
+ logger.warning(
+ "While checking auth of %r against auth_events: %s", event, e
+ )
+ context.rejected = RejectedReason.AUTH_ERROR
+ return context
+
+ # now check auth against what we think the auth events *should* be.
prev_state_ids = await context.get_prev_state_ids()
auth_events_ids = self._event_auth_handler.compute_auth_events(
event, prev_state_ids, for_verification=True
@@ -1292,13 +1425,8 @@ class FederationEventHandler:
}
try:
- (
- context,
- auth_events_for_auth,
- ) = await self._update_auth_events_and_context_for_auth(
- origin,
+ updated_auth_events = await self._update_auth_events_for_auth(
event,
- context,
calculated_auth_event_map=calculated_auth_event_map,
)
except Exception:
@@ -1311,10 +1439,20 @@ class FederationEventHandler:
"Ignoring failure and continuing processing of event.",
event.event_id,
)
+ updated_auth_events = None
+
+ if updated_auth_events:
+ context = await self._update_context_for_auth_events(
+ event, context, updated_auth_events
+ )
+ auth_events_for_auth = updated_auth_events
+ else:
auth_events_for_auth = calculated_auth_event_map
try:
- check_auth_rules_for_event(room_version_obj, event, auth_events_for_auth)
+ check_auth_rules_for_event(
+ room_version_obj, event, auth_events_for_auth.values()
+ )
except AuthError as e:
logger.warning("Failed auth resolution for %r because %s", event, e)
context.rejected = RejectedReason.AUTH_ERROR
@@ -1412,11 +1550,9 @@ class FederationEventHandler:
current_state_ids_list = [
e for k, e in current_state_ids.items() if k in auth_types
]
-
- auth_events_map = await self._store.get_events(current_state_ids_list)
- current_auth_events = {
- (e.type, e.state_key): e for e in auth_events_map.values()
- }
+ current_auth_events = await self._store.get_events_as_list(
+ current_state_ids_list
+ )
try:
check_auth_rules_for_event(room_version_obj, event, current_auth_events)
@@ -1435,13 +1571,11 @@ class FederationEventHandler:
soft_failed_event_counter.inc()
event.internal_metadata.soft_failed = True
- async def _update_auth_events_and_context_for_auth(
+ async def _update_auth_events_for_auth(
self,
- origin: str,
event: EventBase,
- context: EventContext,
calculated_auth_event_map: StateMap[EventBase],
- ) -> Tuple[EventContext, StateMap[EventBase]]:
+ ) -> Optional[StateMap[EventBase]]:
"""Helper for _check_event_auth. See there for docs.
Checks whether a given event has the expected auth events. If it
@@ -1454,93 +1588,27 @@ class FederationEventHandler:
processing of the event.
Args:
- origin:
event:
- context:
calculated_auth_event_map:
Our calculated auth_events based on the state of the room
at the event's position in the DAG.
Returns:
- updated context, updated auth event map
+ updated auth event map, or None if no changes are needed.
+
"""
assert not event.internal_metadata.outlier
- # take a copy of calculated_auth_event_map before we modify it.
- auth_events: MutableStateMap[EventBase] = dict(calculated_auth_event_map)
-
+ # check for events which are in the event's claimed auth_events, but not
+ # in our calculated event map.
event_auth_events = set(event.auth_event_ids())
-
- # missing_auth is the set of the event's auth_events which we don't yet have
- # in auth_events.
- missing_auth = event_auth_events.difference(
- e.event_id for e in auth_events.values()
- )
-
- # if we have missing events, we need to fetch those events from somewhere.
- #
- # we start by checking if they are in the store, and then try calling /event_auth/.
- if missing_auth:
- have_events = await self._store.have_seen_events(
- event.room_id, missing_auth
- )
- logger.debug("Events %s are in the store", have_events)
- missing_auth.difference_update(have_events)
-
- # missing_auth is now the set of event_ids which:
- # a. are listed in event.auth_events, *and*
- # b. are *not* part of our calculated auth events based on room state, *and*
- # c. are *not* yet in our database.
-
- if missing_auth:
- # If we don't have all the auth events, we need to get them.
- logger.info("auth_events contains unknown events: %s", missing_auth)
- try:
- await self._get_remote_auth_chain_for_event(
- origin, event.room_id, event.event_id
- )
- except Exception:
- logger.exception("Failed to get auth chain")
- else:
- # load any auth events we might have persisted from the database. This
- # has the side-effect of correctly setting the rejected_reason on them.
- auth_events.update(
- {
- (ae.type, ae.state_key): ae
- for ae in await self._store.get_events_as_list(
- missing_auth, allow_rejected=True
- )
- }
- )
-
- # auth_events now contains
- # 1. our *calculated* auth events based on the room state, plus:
- # 2. any events which:
- # a. are listed in `event.auth_events`, *and*
- # b. are not part of our calculated auth events, *and*
- # c. were not in our database before the call to /event_auth
- # d. have since been added to our database (most likely by /event_auth).
-
different_auth = event_auth_events.difference(
- e.event_id for e in auth_events.values()
+ e.event_id for e in calculated_auth_event_map.values()
)
- # different_auth is the set of events which *are* in `event.auth_events`, but
- # which are *not* in `auth_events`. Comparing with (2.) above, this means
- # exclusively the set of `event.auth_events` which we already had in our
- # database before any call to /event_auth.
- #
- # I'm reasonably sure that the fact that events returned by /event_auth are
- # blindly added to auth_events (and hence excluded from different_auth) is a bug
- # - though it's a very long-standing one (see
- # https://github.com/matrix-org/synapse/commit/78015948a7febb18e000651f72f8f58830a55b93#diff-0bc92da3d703202f5b9be2d3f845e375f5b1a6bc6ba61705a8af9be1121f5e42R786
- # from Jan 2015 which seems to add it, though it actually just moves it from
- # elsewhere (before that, it gets lost in a mess of huge "various bug fixes"
- # PRs).
-
if not different_auth:
- return context, auth_events
+ return None
logger.info(
"auth_events refers to events which are not in our calculated auth "
@@ -1552,27 +1620,18 @@ class FederationEventHandler:
# necessary?
different_events = await self._store.get_events_as_list(different_auth)
+ # double-check they're all in the same room - we should already have checked
+ # this but it doesn't hurt to check again.
for d in different_events:
- if d.room_id != event.room_id:
- logger.warning(
- "Event %s refers to auth_event %s which is in a different room",
- event.event_id,
- d.event_id,
- )
-
- # don't attempt to resolve the claimed auth events against our own
- # in this case: just use our own auth events.
- #
- # XXX: should we reject the event in this case? It feels like we should,
- # but then shouldn't we also do so if we've failed to fetch any of the
- # auth events?
- return context, auth_events
+ assert (
+ d.room_id == event.room_id
+ ), f"Event {event.event_id} refers to auth_event {d.event_id} which is in a different room"
# now we state-resolve between our own idea of the auth events, and the remote's
# idea of them.
- local_state = auth_events.values()
- remote_auth_events = dict(auth_events)
+ local_state = calculated_auth_event_map.values()
+ remote_auth_events = dict(calculated_auth_event_map)
remote_auth_events.update({(d.type, d.state_key): d for d in different_events})
remote_state = remote_auth_events.values()
@@ -1580,23 +1639,93 @@ class FederationEventHandler:
new_state = await self._state_handler.resolve_events(
room_version, (local_state, remote_state), event
)
+ different_state = {
+ (d.type, d.state_key): d
+ for d in new_state.values()
+ if calculated_auth_event_map.get((d.type, d.state_key)) != d
+ }
+ if not different_state:
+ logger.info("State res returned no new state")
+ return None
logger.info(
"After state res: updating auth_events with new state %s",
- {
- (d.type, d.state_key): d.event_id
- for d in new_state.values()
- if auth_events.get((d.type, d.state_key)) != d
- },
+ different_state.values(),
)
- auth_events.update(new_state)
+ # take a copy of calculated_auth_event_map before we modify it.
+ auth_events = dict(calculated_auth_event_map)
+ auth_events.update(different_state)
+ return auth_events
+
+ async def _load_or_fetch_auth_events_for_event(
+ self, destination: str, event: EventBase
+ ) -> Collection[EventBase]:
+ """Fetch this event's auth_events, from database or remote
+
+ Loads any of the auth_events that we already have from the database/cache. If
+ there are any that are missing, calls /event_auth to get the complete auth
+ chain for the event (and then attempts to load the auth_events again).
+
+ If any of the auth_events cannot be found, raises an AuthError. This can happen
+ for a number of reasons; eg: the events don't exist, or we were unable to talk
+ to `destination`, or we couldn't validate the signature on the event (which
+ in turn has multiple potential causes).
+
+ Args:
+ destination: where to send the /event_auth request. Typically the server
+ that sent us `event` in the first place.
+ event: the event whose auth_events we want
+
+ Returns:
+ all of the events in `event.auth_events`, after deduplication
- context = await self._update_context_for_auth_events(
- event, context, auth_events
+ Raises:
+ AuthError if we were unable to fetch the auth_events for any reason.
+ """
+ event_auth_event_ids = set(event.auth_event_ids())
+ event_auth_events = await self._store.get_events(
+ event_auth_event_ids, allow_rejected=True
+ )
+ missing_auth_event_ids = event_auth_event_ids.difference(
+ event_auth_events.keys()
)
+ if not missing_auth_event_ids:
+ return event_auth_events.values()
- return context, auth_events
+ logger.info(
+ "Event %s refers to unknown auth events %s: fetching auth chain",
+ event,
+ missing_auth_event_ids,
+ )
+ try:
+ await self._get_remote_auth_chain_for_event(
+ destination, event.room_id, event.event_id
+ )
+ except Exception as e:
+ logger.warning("Failed to get auth chain for %s: %s", event, e)
+ # in this case, it's very likely we still won't have all the auth
+ # events - but we pick that up below.
+
+ # try to fetch the auth events we missed list time.
+ extra_auth_events = await self._store.get_events(
+ missing_auth_event_ids, allow_rejected=True
+ )
+ missing_auth_event_ids.difference_update(extra_auth_events.keys())
+ event_auth_events.update(extra_auth_events)
+ if not missing_auth_event_ids:
+ return event_auth_events.values()
+
+ # we still don't have all the auth events.
+ logger.warning(
+ "Missing auth events for %s: %s",
+ event,
+ shortstr(missing_auth_event_ids),
+ )
+ # the fact we can't find the auth event doesn't mean it doesn't
+ # exist, which means it is premature to store `event` as rejected.
+ # instead we raise an AuthError, which will make the caller ignore it.
+ raise AuthError(code=HTTPStatus.FORBIDDEN, msg="Auth events could not be found")
async def _get_remote_auth_chain_for_event(
self, destination: str, room_id: str, event_id: str
@@ -1633,9 +1762,7 @@ class FederationEventHandler:
for s in seen_remotes:
remote_event_map.pop(s, None)
- await self._auth_and_persist_fetched_events(
- destination, room_id, remote_event_map.values()
- )
+ await self._auth_and_persist_outliers(room_id, remote_event_map.values())
async def _update_context_for_auth_events(
self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase]
@@ -1705,16 +1832,27 @@ class FederationEventHandler:
# persist_events_and_notify directly.)
assert not event.internal_metadata.outlier
- try:
- if (
- not backfilled
- and not context.rejected
- and (await self._store.get_min_depth(event.room_id)) <= event.depth
- ):
+ if not backfilled and not context.rejected:
+ min_depth = await self._store.get_min_depth(event.room_id)
+ if min_depth is None or min_depth > event.depth:
+ # XXX richvdh 2021/10/07: I don't really understand what this
+ # condition is doing. I think it's trying not to send pushes
+ # for events that predate our join - but that's not really what
+ # min_depth means, and anyway ancient events are a more general
+ # problem.
+ #
+ # for now I'm just going to log about it.
+ logger.info(
+ "Skipping push actions for old event with depth %s < %s",
+ event.depth,
+ min_depth,
+ )
+ else:
await self._action_generator.handle_push_actions_for_event(
event, context
)
+ try:
await self.persist_events_and_notify(
event.room_id, [(event, context)], backfilled=backfilled
)
@@ -1846,6 +1984,3 @@ class FederationEventHandler:
len(ev.auth_event_ids()),
)
raise SynapseError(HTTPStatus.BAD_REQUEST, "Too many auth_events")
-
- async def get_min_depth_for_context(self, context: str) -> int:
- return await self._store.get_min_depth(context)
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 9c319b5383..7ef8698a5e 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -54,7 +54,9 @@ class IdentityHandler:
self.http_client = SimpleHttpClient(hs)
# An HTTP client for contacting identity servers specified by clients.
self.blacklisting_http_client = SimpleHttpClient(
- hs, ip_blacklist=hs.config.server.federation_ip_range_blacklist
+ hs,
+ ip_blacklist=hs.config.server.federation_ip_range_blacklist,
+ ip_whitelist=hs.config.server.federation_ip_range_whitelist,
)
self.federation_http_client = hs.get_federation_http_client()
self.hs = hs
diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py
index 176e4dfdd4..60ff896386 100644
--- a/synapse/handlers/pagination.py
+++ b/synapse/handlers/pagination.py
@@ -86,19 +86,22 @@ class PaginationHandler:
self._event_serializer = hs.get_event_client_serializer()
self._retention_default_max_lifetime = (
- hs.config.server.retention_default_max_lifetime
+ hs.config.retention.retention_default_max_lifetime
)
self._retention_allowed_lifetime_min = (
- hs.config.server.retention_allowed_lifetime_min
+ hs.config.retention.retention_allowed_lifetime_min
)
self._retention_allowed_lifetime_max = (
- hs.config.server.retention_allowed_lifetime_max
+ hs.config.retention.retention_allowed_lifetime_max
)
- if hs.config.worker.run_background_tasks and hs.config.server.retention_enabled:
+ if (
+ hs.config.worker.run_background_tasks
+ and hs.config.retention.retention_enabled
+ ):
# Run the purge jobs described in the configuration file.
- for job in hs.config.server.retention_purge_jobs:
+ for job in hs.config.retention.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job)
self.clock.looping_call(
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 404afb9402..b5968e047b 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -1489,7 +1489,7 @@ def format_user_presence_state(
The "user_id" is optional so that this function can be used to format presence
updates for client /sync responses and for federation /send requests.
"""
- content = {"presence": state.state}
+ content: JsonDict = {"presence": state.state}
if include_user_id:
content["user_id"] = state.user_id
if state.last_active_ts:
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 52b2de388f..991fee7e58 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -266,14 +266,17 @@ class UserDirectoryHandler(StateDeltasHandler):
for user_id in users_in_room:
await self.store.remove_user_who_share_room(user_id, room_id)
- # Then, re-add them to the tables.
+ # Then, re-add all remote users and some local users to the tables.
# NOTE: this is not the most efficient method, as _track_user_joined_room sets
# up local_user -> other_user and other_user_whos_local -> local_user,
# which when ran over an entire room, will result in the same values
# being added multiple times. The batching upserts shouldn't make this
# too bad, though.
for user_id in users_in_room:
- await self._track_user_joined_room(room_id, user_id)
+ if not self.is_mine_id(
+ user_id
+ ) or await self.store.should_include_local_user_in_dir(user_id):
+ await self._track_user_joined_room(room_id, user_id)
async def _handle_room_membership_event(
self,
@@ -364,8 +367,8 @@ class UserDirectoryHandler(StateDeltasHandler):
"""Someone's just joined a room. Update `users_in_public_rooms` or
`users_who_share_private_rooms` as appropriate.
- The caller is responsible for ensuring that the given user is not excluded
- from the user directory.
+ The caller is responsible for ensuring that the given user should be
+ included in the user directory.
"""
is_public = await self.store.is_room_world_readable_or_publicly_joinable(
room_id
@@ -412,16 +415,19 @@ class UserDirectoryHandler(StateDeltasHandler):
room_id: The room ID that user left or stopped being public that
user_id
"""
- logger.debug("Removing user %r", user_id)
+ logger.debug("Removing user %r from room %r", user_id, room_id)
# Remove user from sharing tables
await self.store.remove_user_who_share_room(user_id, room_id)
- # Are they still in any rooms? If not, remove them entirely.
- rooms_user_is_in = await self.store.get_user_dir_rooms_user_is_in(user_id)
+ # Additionally, if they're a remote user and we're no longer joined
+ # to any rooms they're in, remove them from the user directory.
+ if not self.is_mine_id(user_id):
+ rooms_user_is_in = await self.store.get_user_dir_rooms_user_is_in(user_id)
- if len(rooms_user_is_in) == 0:
- await self.store.remove_from_user_dir(user_id)
+ if len(rooms_user_is_in) == 0:
+ logger.debug("Removing user %r from directory", user_id)
+ await self.store.remove_from_user_dir(user_id)
async def _handle_possible_remote_profile_change(
self,
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 8c80153ab6..7bae36db16 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -182,9 +182,13 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
# a logcontext which we use for processing incoming commands. We declare it as a
# background process so that the CPU stats get reported to prometheus.
- self._logging_context = BackgroundProcessLoggingContext(
- "replication-conn", self.conn_id
- )
+ with PreserveLoggingContext():
+ # thanks to `PreserveLoggingContext()`, the new logcontext is guaranteed to
+ # capture the sentinel context as its containing context and won't prevent
+ # GC of / unintentionally reactivate what would be the current context.
+ self._logging_context = BackgroundProcessLoggingContext(
+ "replication-conn", self.conn_id
+ )
def connectionMade(self):
logger.info("[%s] Connection established", self.id())
@@ -434,8 +438,12 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
if self.transport:
self.transport.unregisterProducer()
- # mark the logging context as finished
- self._logging_context.__exit__(None, None, None)
+ # mark the logging context as finished by triggering `__exit__()`
+ with PreserveLoggingContext():
+ with self._logging_context:
+ pass
+ # the sentinel context is now active, which may not be correct.
+ # PreserveLoggingContext() will restore the correct logging context.
def __str__(self):
addr = None
diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py
index 062fe2f33e..8d28bd3f3f 100644
--- a/synapse/replication/tcp/redis.py
+++ b/synapse/replication/tcp/redis.py
@@ -100,9 +100,13 @@ class RedisSubscriber(txredisapi.SubscriberProtocol):
# a logcontext which we use for processing incoming commands. We declare it as a
# background process so that the CPU stats get reported to prometheus.
- self._logging_context = BackgroundProcessLoggingContext(
- "replication_command_handler"
- )
+ with PreserveLoggingContext():
+ # thanks to `PreserveLoggingContext()`, the new logcontext is guaranteed to
+ # capture the sentinel context as its containing context and won't prevent
+ # GC of / unintentionally reactivate what would be the current context.
+ self._logging_context = BackgroundProcessLoggingContext(
+ "replication_command_handler"
+ )
def connectionMade(self):
logger.info("Connected to redis")
@@ -182,8 +186,12 @@ class RedisSubscriber(txredisapi.SubscriberProtocol):
super().connectionLost(reason)
self.synapse_handler.lost_connection(self)
- # mark the logging context as finished
- self._logging_context.__exit__(None, None, None)
+ # mark the logging context as finished by triggering `__exit__()`
+ with PreserveLoggingContext():
+ with self._logging_context:
+ pass
+ # the sentinel context is now active, which may not be correct.
+ # PreserveLoggingContext() will restore the correct logging context.
def send_command(self, cmd: Command):
"""Send a command if connection has been established.
diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py
index 8ce443049e..30a687d234 100644
--- a/synapse/rest/admin/media.py
+++ b/synapse/rest/admin/media.py
@@ -40,7 +40,7 @@ class QuarantineMediaInRoom(RestServlet):
"""
PATTERNS = [
- *admin_patterns("/room/(?P<room_id>[^/]+)/media/quarantine"),
+ *admin_patterns("/room/(?P<room_id>[^/]+)/media/quarantine$"),
# This path kept around for legacy reasons
*admin_patterns("/quarantine_media/(?P<room_id>[^/]+)"),
]
@@ -70,7 +70,7 @@ class QuarantineMediaByUser(RestServlet):
this server.
"""
- PATTERNS = admin_patterns("/user/(?P<user_id>[^/]+)/media/quarantine")
+ PATTERNS = admin_patterns("/user/(?P<user_id>[^/]+)/media/quarantine$")
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
@@ -199,7 +199,7 @@ class UnprotectMediaByID(RestServlet):
class ListMediaInRoom(RestServlet):
"""Lists all of the media in a given room."""
- PATTERNS = admin_patterns("/room/(?P<room_id>[^/]+)/media")
+ PATTERNS = admin_patterns("/room/(?P<room_id>[^/]+)/media$")
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
@@ -219,7 +219,7 @@ class ListMediaInRoom(RestServlet):
class PurgeMediaCacheRestServlet(RestServlet):
- PATTERNS = admin_patterns("/purge_media_cache")
+ PATTERNS = admin_patterns("/purge_media_cache$")
def __init__(self, hs: "HomeServer"):
self.media_repository = hs.get_media_repository()
@@ -231,6 +231,20 @@ class PurgeMediaCacheRestServlet(RestServlet):
before_ts = parse_integer(request, "before_ts", required=True)
logger.info("before_ts: %r", before_ts)
+ if before_ts < 0:
+ raise SynapseError(
+ 400,
+ "Query parameter before_ts must be a positive integer.",
+ errcode=Codes.INVALID_PARAM,
+ )
+ elif before_ts < 30000000000: # Dec 1970 in milliseconds, Aug 2920 in seconds
+ raise SynapseError(
+ 400,
+ "Query parameter before_ts you provided is from the year 1970. "
+ + "Double check that you are providing a timestamp in milliseconds.",
+ errcode=Codes.INVALID_PARAM,
+ )
+
ret = await self.media_repository.delete_old_remote_media(before_ts)
return 200, ret
@@ -271,7 +285,7 @@ class DeleteMediaByDateSize(RestServlet):
timestamp and size.
"""
- PATTERNS = admin_patterns("/media/(?P<server_name>[^/]+)/delete")
+ PATTERNS = admin_patterns("/media/(?P<server_name>[^/]+)/delete$")
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
@@ -291,7 +305,14 @@ class DeleteMediaByDateSize(RestServlet):
if before_ts < 0:
raise SynapseError(
400,
- "Query parameter before_ts must be a string representing a positive integer.",
+ "Query parameter before_ts must be a positive integer.",
+ errcode=Codes.INVALID_PARAM,
+ )
+ elif before_ts < 30000000000: # Dec 1970 in milliseconds, Aug 2920 in seconds
+ raise SynapseError(
+ 400,
+ "Query parameter before_ts you provided is from the year 1970. "
+ + "Double check that you are providing a timestamp in milliseconds.",
errcode=Codes.INVALID_PARAM,
)
if size_gt < 0:
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index f20aa65301..c0bebc3cf0 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -35,6 +35,7 @@ from synapse.rest.admin._base import (
assert_user_is_admin,
)
from synapse.rest.client._base import client_patterns
+from synapse.storage.databases.main.registration import ExternalIDReuseException
from synapse.storage.databases.main.stats import UserSortOrder
from synapse.types import JsonDict, UserID
@@ -228,12 +229,12 @@ class UserRestServletV2(RestServlet):
if not isinstance(deactivate, bool):
raise SynapseError(400, "'deactivated' parameter is not of type boolean")
- # convert List[Dict[str, str]] into Set[Tuple[str, str]]
+ # convert List[Dict[str, str]] into List[Tuple[str, str]]
if external_ids is not None:
- new_external_ids = {
+ new_external_ids = [
(external_id["auth_provider"], external_id["external_id"])
for external_id in external_ids
- }
+ ]
# convert List[Dict[str, str]] into Set[Tuple[str, str]]
if threepids is not None:
@@ -275,28 +276,13 @@ class UserRestServletV2(RestServlet):
)
if external_ids is not None:
- # get changed external_ids (added and removed)
- cur_external_ids = set(
- await self.store.get_external_ids_by_user(user_id)
- )
- add_external_ids = new_external_ids - cur_external_ids
- del_external_ids = cur_external_ids - new_external_ids
-
- # remove old external_ids
- for auth_provider, external_id in del_external_ids:
- await self.store.remove_user_external_id(
- auth_provider,
- external_id,
- user_id,
- )
-
- # add new external_ids
- for auth_provider, external_id in add_external_ids:
- await self.store.record_user_external_id(
- auth_provider,
- external_id,
+ try:
+ await self.store.replace_user_external_id(
+ new_external_ids,
user_id,
)
+ except ExternalIDReuseException:
+ raise SynapseError(409, "External id is already in use.")
if "avatar_url" in body and isinstance(body["avatar_url"], str):
await self.profile_handler.set_avatar_url(
@@ -384,12 +370,15 @@ class UserRestServletV2(RestServlet):
)
if external_ids is not None:
- for auth_provider, external_id in new_external_ids:
- await self.store.record_user_external_id(
- auth_provider,
- external_id,
- user_id,
- )
+ try:
+ for auth_provider, external_id in new_external_ids:
+ await self.store.record_user_external_id(
+ auth_provider,
+ external_id,
+ user_id,
+ )
+ except ExternalIDReuseException:
+ raise SynapseError(409, "External id is already in use.")
if "avatar_url" in body and isinstance(body["avatar_url"], str):
await self.profile_handler.set_avatar_url(
diff --git a/synapse/state/v1.py b/synapse/state/v1.py
index ffe6207a3c..6edadea550 100644
--- a/synapse/state/v1.py
+++ b/synapse/state/v1.py
@@ -332,7 +332,7 @@ def _resolve_auth_events(
event_auth.check_auth_rules_for_event(
RoomVersions.V1,
event,
- auth_events,
+ auth_events.values(),
)
prev_event = event
except AuthError:
@@ -350,7 +350,7 @@ def _resolve_normal_events(
event_auth.check_auth_rules_for_event(
RoomVersions.V1,
event,
- auth_events,
+ auth_events.values(),
)
return event
except AuthError:
diff --git a/synapse/state/v2.py b/synapse/state/v2.py
index bd18eefd58..c618df2fde 100644
--- a/synapse/state/v2.py
+++ b/synapse/state/v2.py
@@ -549,7 +549,7 @@ async def _iterative_auth_checks(
event_auth.check_auth_rules_for_event(
room_version,
event,
- auth_events,
+ auth_events.values(),
)
resolved_state[(event.type, event.state_key)] = event_id
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index 2cbc634370..4a4d35f77c 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -908,7 +908,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
desc="get_latest_event_ids_in_room",
)
- async def get_min_depth(self, room_id: str) -> int:
+ async def get_min_depth(self, room_id: str) -> Optional[int]:
"""For the given room, get the minimum depth we have seen for it."""
return await self.db_pool.runInteraction(
"get_min_depth", self._get_min_depth_interaction, room_id
diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 4a1a2f4a6a..ae37901be9 100644
--- a/synapse/storage/databases/main/events_worker.py
+++ b/synapse/storage/databases/main/events_worker.py
@@ -55,8 +55,9 @@ from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
from synapse.replication.tcp.streams import BackfillStream
from synapse.replication.tcp.streams.events import EventsStream
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
-from synapse.storage.database import DatabasePool
+from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.engines import PostgresEngine
+from synapse.storage.types import Connection
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import JsonDict, get_domain_from_id
@@ -86,6 +87,47 @@ class _EventCacheEntry:
redacted_event: Optional[EventBase]
+@attr.s(slots=True, frozen=True, auto_attribs=True)
+class _EventRow:
+ """
+ An event, as pulled from the database.
+
+ Properties:
+ event_id: The event ID of the event.
+
+ stream_ordering: stream ordering for this event
+
+ json: json-encoded event structure
+
+ internal_metadata: json-encoded internal metadata dict
+
+ format_version: The format of the event. Hopefully one of EventFormatVersions.
+ 'None' means the event predates EventFormatVersions (so the event is format V1).
+
+ room_version_id: The version of the room which contains the event. Hopefully
+ one of RoomVersions.
+
+ Due to historical reasons, there may be a few events in the database which
+ do not have an associated room; in this case None will be returned here.
+
+ rejected_reason: if the event was rejected, the reason why.
+
+ redactions: a list of event-ids which (claim to) redact this event.
+
+ outlier: True if this event is an outlier.
+ """
+
+ event_id: str
+ stream_ordering: int
+ json: str
+ internal_metadata: str
+ format_version: Optional[int]
+ room_version_id: Optional[int]
+ rejected_reason: Optional[str]
+ redactions: List[str]
+ outlier: bool
+
+
class EventRedactBehaviour(Names):
"""
What to do when retrieving a redacted event from the database.
@@ -686,7 +728,7 @@ class EventsWorkerStore(SQLBaseStore):
for e in state_to_include.values()
]
- def _do_fetch(self, conn):
+ def _do_fetch(self, conn: Connection) -> None:
"""Takes a database connection and waits for requests for events from
the _event_fetch_list queue.
"""
@@ -713,13 +755,15 @@ class EventsWorkerStore(SQLBaseStore):
self._fetch_event_list(conn, event_list)
- def _fetch_event_list(self, conn, event_list):
+ def _fetch_event_list(
+ self, conn: Connection, event_list: List[Tuple[List[str], defer.Deferred]]
+ ) -> None:
"""Handle a load of requests from the _event_fetch_list queue
Args:
- conn (twisted.enterprise.adbapi.Connection): database connection
+ conn: database connection
- event_list (list[Tuple[list[str], Deferred]]):
+ event_list:
The fetch requests. Each entry consists of a list of event
ids to be fetched, and a deferred to be completed once the
events have been fetched.
@@ -788,7 +832,7 @@ class EventsWorkerStore(SQLBaseStore):
row = row_map.get(event_id)
fetched_events[event_id] = row
if row:
- redaction_ids.update(row["redactions"])
+ redaction_ids.update(row.redactions)
events_to_fetch = redaction_ids.difference(fetched_events.keys())
if events_to_fetch:
@@ -799,32 +843,32 @@ class EventsWorkerStore(SQLBaseStore):
for event_id, row in fetched_events.items():
if not row:
continue
- assert row["event_id"] == event_id
+ assert row.event_id == event_id
- rejected_reason = row["rejected_reason"]
+ rejected_reason = row.rejected_reason
# If the event or metadata cannot be parsed, log the error and act
# as if the event is unknown.
try:
- d = db_to_json(row["json"])
+ d = db_to_json(row.json)
except ValueError:
logger.error("Unable to parse json from event: %s", event_id)
continue
try:
- internal_metadata = db_to_json(row["internal_metadata"])
+ internal_metadata = db_to_json(row.internal_metadata)
except ValueError:
logger.error(
"Unable to parse internal_metadata from event: %s", event_id
)
continue
- format_version = row["format_version"]
+ format_version = row.format_version
if format_version is None:
# This means that we stored the event before we had the concept
# of a event format version, so it must be a V1 event.
format_version = EventFormatVersions.V1
- room_version_id = row["room_version_id"]
+ room_version_id = row.room_version_id
if not room_version_id:
# this should only happen for out-of-band membership events which
@@ -889,8 +933,8 @@ class EventsWorkerStore(SQLBaseStore):
internal_metadata_dict=internal_metadata,
rejected_reason=rejected_reason,
)
- original_ev.internal_metadata.stream_ordering = row["stream_ordering"]
- original_ev.internal_metadata.outlier = row["outlier"]
+ original_ev.internal_metadata.stream_ordering = row.stream_ordering
+ original_ev.internal_metadata.outlier = row.outlier
event_map[event_id] = original_ev
@@ -898,7 +942,7 @@ class EventsWorkerStore(SQLBaseStore):
# the cache entries.
result_map = {}
for event_id, original_ev in event_map.items():
- redactions = fetched_events[event_id]["redactions"]
+ redactions = fetched_events[event_id].redactions
redacted_event = self._maybe_redact_event_row(
original_ev, redactions, event_map
)
@@ -912,17 +956,17 @@ class EventsWorkerStore(SQLBaseStore):
return result_map
- async def _enqueue_events(self, events):
+ async def _enqueue_events(self, events: Iterable[str]) -> Dict[str, _EventRow]:
"""Fetches events from the database using the _event_fetch_list. This
allows batch and bulk fetching of events - it allows us to fetch events
without having to create a new transaction for each request for events.
Args:
- events (Iterable[str]): events to be fetched.
+ events: events to be fetched.
Returns:
- Dict[str, Dict]: map from event id to row data from the database.
- May contain events that weren't requested.
+ A map from event id to row data from the database. May contain events
+ that weren't requested.
"""
events_d = defer.Deferred()
@@ -949,43 +993,19 @@ class EventsWorkerStore(SQLBaseStore):
return row_map
- def _fetch_event_rows(self, txn, event_ids):
+ def _fetch_event_rows(
+ self, txn: LoggingTransaction, event_ids: Iterable[str]
+ ) -> Dict[str, _EventRow]:
"""Fetch event rows from the database
Events which are not found are omitted from the result.
- The returned per-event dicts contain the following keys:
-
- * event_id (str)
-
- * stream_ordering (int): stream ordering for this event
-
- * json (str): json-encoded event structure
-
- * internal_metadata (str): json-encoded internal metadata dict
-
- * format_version (int|None): The format of the event. Hopefully one
- of EventFormatVersions. 'None' means the event predates
- EventFormatVersions (so the event is format V1).
-
- * room_version_id (str|None): The version of the room which contains the event.
- Hopefully one of RoomVersions.
-
- Due to historical reasons, there may be a few events in the database which
- do not have an associated room; in this case None will be returned here.
-
- * rejected_reason (str|None): if the event was rejected, the reason
- why.
-
- * redactions (List[str]): a list of event-ids which (claim to) redact
- this event.
-
Args:
- txn (twisted.enterprise.adbapi.Connection):
- event_ids (Iterable[str]): event IDs to fetch
+ txn: The database transaction.
+ event_ids: event IDs to fetch
Returns:
- Dict[str, Dict]: a map from event id to event info.
+ A map from event id to event info.
"""
event_dict = {}
for evs in batch_iter(event_ids, 200):
@@ -1013,17 +1033,17 @@ class EventsWorkerStore(SQLBaseStore):
for row in txn:
event_id = row[0]
- event_dict[event_id] = {
- "event_id": event_id,
- "stream_ordering": row[1],
- "internal_metadata": row[2],
- "json": row[3],
- "format_version": row[4],
- "room_version_id": row[5],
- "rejected_reason": row[6],
- "redactions": [],
- "outlier": row[7],
- }
+ event_dict[event_id] = _EventRow(
+ event_id=event_id,
+ stream_ordering=row[1],
+ internal_metadata=row[2],
+ json=row[3],
+ format_version=row[4],
+ room_version_id=row[5],
+ rejected_reason=row[6],
+ redactions=[],
+ outlier=row[7],
+ )
# check for redactions
redactions_sql = "SELECT event_id, redacts FROM redactions WHERE "
@@ -1035,7 +1055,7 @@ class EventsWorkerStore(SQLBaseStore):
for (redacter, redacted) in txn:
d = event_dict.get(redacted)
if d:
- d["redactions"].append(redacter)
+ d.redactions.append(redacter)
return event_dict
diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py
index 181841ee06..37d47aa823 100644
--- a/synapse/storage/databases/main/registration.py
+++ b/synapse/storage/databases/main/registration.py
@@ -23,7 +23,11 @@ import attr
from synapse.api.constants import UserTypes
from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError
from synapse.metrics.background_process_metrics import wrap_as_background_process
-from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
+from synapse.storage.database import (
+ DatabasePool,
+ LoggingDatabaseConnection,
+ LoggingTransaction,
+)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.types import Cursor
@@ -40,6 +44,13 @@ THIRTY_MINUTES_IN_MS = 30 * 60 * 1000
logger = logging.getLogger(__name__)
+class ExternalIDReuseException(Exception):
+ """Exception if writing an external id for a user fails,
+ because this external id is given to an other user."""
+
+ pass
+
+
@attr.s(frozen=True, slots=True)
class TokenLookupResult:
"""Result of looking up an access token.
@@ -588,24 +599,44 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
auth_provider: identifier for the remote auth provider
external_id: id on that system
user_id: complete mxid that it is mapped to
+ Raises:
+ ExternalIDReuseException if the new external_id could not be mapped.
"""
- await self.db_pool.simple_insert(
+
+ try:
+ await self.db_pool.runInteraction(
+ "record_user_external_id",
+ self._record_user_external_id_txn,
+ auth_provider,
+ external_id,
+ user_id,
+ )
+ except self.database_engine.module.IntegrityError:
+ raise ExternalIDReuseException()
+
+ def _record_user_external_id_txn(
+ self,
+ txn: LoggingTransaction,
+ auth_provider: str,
+ external_id: str,
+ user_id: str,
+ ) -> None:
+
+ self.db_pool.simple_insert_txn(
+ txn,
table="user_external_ids",
values={
"auth_provider": auth_provider,
"external_id": external_id,
"user_id": user_id,
},
- desc="record_user_external_id",
)
async def remove_user_external_id(
self, auth_provider: str, external_id: str, user_id: str
) -> None:
"""Remove a mapping from an external user id to a mxid
-
If the mapping is not found, this method does nothing.
-
Args:
auth_provider: identifier for the remote auth provider
external_id: id on that system
@@ -621,6 +652,60 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
desc="remove_user_external_id",
)
+ async def replace_user_external_id(
+ self,
+ record_external_ids: List[Tuple[str, str]],
+ user_id: str,
+ ) -> None:
+ """Replace mappings from external user ids to a mxid in a single transaction.
+ All mappings are deleted and the new ones are created.
+
+ Args:
+ record_external_ids:
+ List with tuple of auth_provider and external_id to record
+ user_id: complete mxid that it is mapped to
+ Raises:
+ ExternalIDReuseException if the new external_id could not be mapped.
+ """
+
+ def _remove_user_external_ids_txn(
+ txn: LoggingTransaction,
+ user_id: str,
+ ) -> None:
+ """Remove all mappings from external user ids to a mxid
+ If these mappings are not found, this method does nothing.
+
+ Args:
+ user_id: complete mxid that it is mapped to
+ """
+
+ self.db_pool.simple_delete_txn(
+ txn,
+ table="user_external_ids",
+ keyvalues={"user_id": user_id},
+ )
+
+ def _replace_user_external_id_txn(
+ txn: LoggingTransaction,
+ ):
+ _remove_user_external_ids_txn(txn, user_id)
+
+ for auth_provider, external_id in record_external_ids:
+ self._record_user_external_id_txn(
+ txn,
+ auth_provider,
+ external_id,
+ user_id,
+ )
+
+ try:
+ await self.db_pool.runInteraction(
+ "replace_user_external_id",
+ _replace_user_external_id_txn,
+ )
+ except self.database_engine.module.IntegrityError:
+ raise ExternalIDReuseException()
+
async def get_user_by_external_id(
self, auth_provider: str, external_id: str
) -> Optional[str]:
@@ -2237,7 +2322,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
# accident.
row = {"client_secret": None, "validated_at": None}
else:
- raise ThreepidValidationError(400, "Unknown session_id")
+ raise ThreepidValidationError("Unknown session_id")
retrieved_client_secret = row["client_secret"]
validated_at = row["validated_at"]
@@ -2252,14 +2337,14 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
if not row:
raise ThreepidValidationError(
- 400, "Validation token not found or has expired"
+ "Validation token not found or has expired"
)
expires = row["expires"]
next_link = row["next_link"]
if retrieved_client_secret != client_secret:
raise ThreepidValidationError(
- 400, "This client_secret does not match the provided session_id"
+ "This client_secret does not match the provided session_id"
)
# If the session is already validated, no need to revalidate
@@ -2268,7 +2353,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
if expires <= current_ts:
raise ThreepidValidationError(
- 400, "This token has expired. Please request a new one"
+ "This token has expired. Please request a new one"
)
# Looks good. Validate the session
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index d69eaf80ce..835d7889cb 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -679,8 +679,8 @@ class RoomWorkerStore(SQLBaseStore):
# policy.
if not ret:
return {
- "min_lifetime": self.config.server.retention_default_min_lifetime,
- "max_lifetime": self.config.server.retention_default_max_lifetime,
+ "min_lifetime": self.config.retention.retention_default_min_lifetime,
+ "max_lifetime": self.config.retention.retention_default_max_lifetime,
}
row = ret[0]
@@ -690,10 +690,10 @@ class RoomWorkerStore(SQLBaseStore):
# The default values will be None if no default policy has been defined, or if one
# of the attributes is missing from the default policy.
if row["min_lifetime"] is None:
- row["min_lifetime"] = self.config.server.retention_default_min_lifetime
+ row["min_lifetime"] = self.config.retention.retention_default_min_lifetime
if row["max_lifetime"] is None:
- row["max_lifetime"] = self.config.server.retention_default_max_lifetime
+ row["max_lifetime"] = self.config.retention.retention_default_max_lifetime
return row
diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py
index 0120b4688b..b9ad92b977 100644
--- a/tests/handlers/test_user_directory.py
+++ b/tests/handlers/test_user_directory.py
@@ -109,18 +109,14 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
tok=alice_token,
)
- users = self.get_success(self.user_dir_helper.get_users_in_user_directory())
- in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
- in_private = self.get_success(
- self.user_dir_helper.get_users_who_share_private_rooms()
+ # The user directory should reflect the room memberships above.
+ users, in_public, in_private = self.get_success(
+ self.user_dir_helper.get_tables()
)
-
self.assertEqual(users, {alice, bob})
+ self.assertEqual(in_public, {(alice, public), (bob, public), (alice, public2)})
self.assertEqual(
- set(in_public), {(alice, public), (bob, public), (alice, public2)}
- )
- self.assertEqual(
- self.user_dir_helper._compress_shared(in_private),
+ in_private,
{(alice, bob, private), (bob, alice, private)},
)
@@ -209,6 +205,88 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
self.assertEqual(set(in_public), {(user1, room), (user2, room)})
+ def test_excludes_users_when_making_room_public(self) -> None:
+ # Create a regular user and a support user.
+ alice = self.register_user("alice", "pass")
+ alice_token = self.login(alice, "pass")
+ support = "@support1:test"
+ self.get_success(
+ self.store.register_user(
+ user_id=support, password_hash=None, user_type=UserTypes.SUPPORT
+ )
+ )
+
+ # Make a public and private room containing Alice and the support user
+ public, initially_private = self._create_rooms_and_inject_memberships(
+ alice, alice_token, support
+ )
+ self._check_only_one_user_in_directory(alice, public)
+
+ # Alice makes the private room public.
+ self.helper.send_state(
+ initially_private,
+ "m.room.join_rules",
+ {"join_rule": "public"},
+ tok=alice_token,
+ )
+
+ users, in_public, in_private = self.get_success(
+ self.user_dir_helper.get_tables()
+ )
+ self.assertEqual(users, {alice})
+ self.assertEqual(in_public, {(alice, public), (alice, initially_private)})
+ self.assertEqual(in_private, set())
+
+ def test_switching_from_private_to_public_to_private(self) -> None:
+ """Check we update the room sharing tables when switching a room
+ from private to public, then back again to private."""
+ # Alice and Bob share a private room.
+ alice = self.register_user("alice", "pass")
+ alice_token = self.login(alice, "pass")
+ bob = self.register_user("bob", "pass")
+ bob_token = self.login(bob, "pass")
+ room = self.helper.create_room_as(alice, is_public=False, tok=alice_token)
+ self.helper.invite(room, alice, bob, tok=alice_token)
+ self.helper.join(room, bob, tok=bob_token)
+
+ # The user directory should reflect this.
+ def check_user_dir_for_private_room() -> None:
+ users, in_public, in_private = self.get_success(
+ self.user_dir_helper.get_tables()
+ )
+ self.assertEqual(users, {alice, bob})
+ self.assertEqual(in_public, set())
+ self.assertEqual(in_private, {(alice, bob, room), (bob, alice, room)})
+
+ check_user_dir_for_private_room()
+
+ # Alice makes the room public.
+ self.helper.send_state(
+ room,
+ "m.room.join_rules",
+ {"join_rule": "public"},
+ tok=alice_token,
+ )
+
+ # The user directory should be updated accordingly
+ users, in_public, in_private = self.get_success(
+ self.user_dir_helper.get_tables()
+ )
+ self.assertEqual(users, {alice, bob})
+ self.assertEqual(in_public, {(alice, room), (bob, room)})
+ self.assertEqual(in_private, set())
+
+ # Alice makes the room private.
+ self.helper.send_state(
+ room,
+ "m.room.join_rules",
+ {"join_rule": "invite"},
+ tok=alice_token,
+ )
+
+ # The user directory should be updated accordingly
+ check_user_dir_for_private_room()
+
def _create_rooms_and_inject_memberships(
self, creator: str, token: str, joiner: str
) -> Tuple[str, str]:
@@ -232,15 +310,18 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
return public_room, private_room
def _check_only_one_user_in_directory(self, user: str, public: str) -> None:
- users = self.get_success(self.user_dir_helper.get_users_in_user_directory())
- in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
- in_private = self.get_success(
- self.user_dir_helper.get_users_who_share_private_rooms()
- )
+ """Check that the user directory DB tables show that:
+ - only one user is in the user directory
+ - they belong to exactly one public room
+ - they don't share a private room with anyone.
+ """
+ users, in_public, in_private = self.get_success(
+ self.user_dir_helper.get_tables()
+ )
self.assertEqual(users, {user})
- self.assertEqual(set(in_public), {(user, public)})
- self.assertEqual(in_private, [])
+ self.assertEqual(in_public, {(user, public)})
+ self.assertEqual(in_private, set())
def test_handle_local_profile_change_with_support_user(self) -> None:
support_user_id = "@support:test"
@@ -581,11 +662,8 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.user_dir_helper.get_users_in_public_rooms()
)
- self.assertEqual(
- self.user_dir_helper._compress_shared(shares_private),
- {(u1, u2, room), (u2, u1, room)},
- )
- self.assertEqual(public_users, [])
+ self.assertEqual(shares_private, {(u1, u2, room), (u2, u1, room)})
+ self.assertEqual(public_users, set())
# We get one search result when searching for user2 by user1.
s = self.get_success(self.handler.search_users(u1, "user2", 10))
@@ -610,8 +688,8 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.user_dir_helper.get_users_in_public_rooms()
)
- self.assertEqual(self.user_dir_helper._compress_shared(shares_private), set())
- self.assertEqual(public_users, [])
+ self.assertEqual(shares_private, set())
+ self.assertEqual(public_users, set())
# User1 now gets no search results for any of the other users.
s = self.get_success(self.handler.search_users(u1, "user2", 10))
@@ -645,11 +723,8 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.user_dir_helper.get_users_in_public_rooms()
)
- self.assertEqual(
- self.user_dir_helper._compress_shared(shares_private),
- {(u1, u2, room), (u2, u1, room)},
- )
- self.assertEqual(public_users, [])
+ self.assertEqual(shares_private, {(u1, u2, room), (u2, u1, room)})
+ self.assertEqual(public_users, set())
# We get one search result when searching for user2 by user1.
s = self.get_success(self.handler.search_users(u1, "user2", 10))
@@ -704,11 +779,8 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.user_dir_helper.get_users_in_public_rooms()
)
- self.assertEqual(
- self.user_dir_helper._compress_shared(shares_private),
- {(u1, u2, room), (u2, u1, room)},
- )
- self.assertEqual(public_users, [])
+ self.assertEqual(shares_private, {(u1, u2, room), (u2, u1, room)})
+ self.assertEqual(public_users, set())
# Configure a spam checker.
spam_checker = self.hs.get_spam_checker()
@@ -740,8 +812,8 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
)
# No users share rooms
- self.assertEqual(public_users, [])
- self.assertEqual(self.user_dir_helper._compress_shared(shares_private), set())
+ self.assertEqual(public_users, set())
+ self.assertEqual(shares_private, set())
# Despite not sharing a room, search_all_users means we get a search
# result.
@@ -842,6 +914,56 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase):
self.hs.get_storage().persistence.persist_event(event, context)
)
+ def test_local_user_leaving_room_remains_in_user_directory(self) -> None:
+ """We've chosen to simplify the user directory's implementation by
+ always including local users. Ensure this invariant is maintained when
+ a local user
+ - leaves a room, and
+ - leaves the last room they're in which is visible to this server.
+
+ This is user-visible if the "search_all_users" config option is on: the
+ local user who left a room would no longer be searchable if this test fails!
+ """
+ alice = self.register_user("alice", "pass")
+ alice_token = self.login(alice, "pass")
+ bob = self.register_user("bob", "pass")
+ bob_token = self.login(bob, "pass")
+
+ # Alice makes two public rooms, which Bob joins.
+ room1 = self.helper.create_room_as(alice, is_public=True, tok=alice_token)
+ room2 = self.helper.create_room_as(alice, is_public=True, tok=alice_token)
+ self.helper.join(room1, bob, tok=bob_token)
+ self.helper.join(room2, bob, tok=bob_token)
+
+ # The user directory tables are updated.
+ users, in_public, in_private = self.get_success(
+ self.user_dir_helper.get_tables()
+ )
+ self.assertEqual(users, {alice, bob})
+ self.assertEqual(
+ in_public, {(alice, room1), (alice, room2), (bob, room1), (bob, room2)}
+ )
+ self.assertEqual(in_private, set())
+
+ # Alice leaves one room. She should still be in the directory.
+ self.helper.leave(room1, alice, tok=alice_token)
+ users, in_public, in_private = self.get_success(
+ self.user_dir_helper.get_tables()
+ )
+ self.assertEqual(users, {alice, bob})
+ self.assertEqual(in_public, {(alice, room2), (bob, room1), (bob, room2)})
+ self.assertEqual(in_private, set())
+
+ # Alice leaves the other. She should still be in the directory.
+ self.helper.leave(room2, alice, tok=alice_token)
+ self.wait_for_background_updates()
+ users, in_public, in_private = self.get_success(
+ self.user_dir_helper.get_tables()
+ )
+ self.assertEqual(users, {alice, bob})
+ self.assertEqual(in_public, {(bob, room1), (bob, room2)})
+ self.assertEqual(in_private, set())
+
class TestUserDirSearchDisabled(unittest.HomeserverTestCase):
servlets = [
diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py
index ce30a19213..db0e78c039 100644
--- a/tests/rest/admin/test_media.py
+++ b/tests/rest/admin/test_media.py
@@ -27,6 +27,9 @@ from tests import unittest
from tests.server import FakeSite, make_request
from tests.test_utils import SMALL_PNG
+VALID_TIMESTAMP = 1609459200000 # 2021-01-01 in milliseconds
+INVALID_TIMESTAMP_IN_S = 1893456000 # 2030-01-01 in seconds
+
class DeleteMediaByIDTestCase(unittest.HomeserverTestCase):
@@ -203,6 +206,9 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.filepaths = MediaFilePaths(hs.config.media.media_store_path)
self.url = "/_synapse/admin/v1/media/%s/delete" % self.server_name
+ # Move clock up to somewhat realistic time
+ self.reactor.advance(1000000000)
+
def test_no_auth(self):
"""
Try to delete media without authentication.
@@ -237,7 +243,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST",
- url + "?before_ts=1234",
+ url + f"?before_ts={VALID_TIMESTAMP}",
access_token=self.admin_user_tok,
)
@@ -273,13 +279,27 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
self.assertEqual(
- "Query parameter before_ts must be a string representing a positive integer.",
+ "Query parameter before_ts must be a positive integer.",
channel.json_body["error"],
)
channel = self.make_request(
"POST",
- self.url + "?before_ts=1234&size_gt=-1234",
+ self.url + f"?before_ts={INVALID_TIMESTAMP_IN_S}",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
+ self.assertEqual(
+ "Query parameter before_ts you provided is from the year 1970. "
+ + "Double check that you are providing a timestamp in milliseconds.",
+ channel.json_body["error"],
+ )
+
+ channel = self.make_request(
+ "POST",
+ self.url + f"?before_ts={VALID_TIMESTAMP}&size_gt=-1234",
access_token=self.admin_user_tok,
)
@@ -292,7 +312,7 @@ class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST",
- self.url + "?before_ts=1234&keep_profiles=not_bool",
+ self.url + f"?before_ts={VALID_TIMESTAMP}&keep_profiles=not_bool",
access_token=self.admin_user_tok,
)
@@ -767,3 +787,81 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase):
media_info = self.get_success(self.store.get_local_media(self.media_id))
self.assertFalse(media_info["safe_from_quarantine"])
+
+
+class PurgeMediaCacheTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ synapse.rest.admin.register_servlets_for_media_repo,
+ login.register_servlets,
+ profile.register_servlets,
+ room.register_servlets,
+ ]
+
+ def prepare(self, reactor, clock, hs):
+ self.media_repo = hs.get_media_repository_resource()
+ self.server_name = hs.hostname
+
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+
+ self.filepaths = MediaFilePaths(hs.config.media.media_store_path)
+ self.url = "/_synapse/admin/v1/purge_media_cache"
+
+ def test_no_auth(self):
+ """
+ Try to delete media without authentication.
+ """
+
+ channel = self.make_request("POST", self.url, b"{}")
+
+ self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
+
+ def test_requester_is_not_admin(self):
+ """
+ If the user is not a server admin, an error is returned.
+ """
+ self.other_user = self.register_user("user", "pass")
+ self.other_user_token = self.login("user", "pass")
+
+ channel = self.make_request(
+ "POST",
+ self.url,
+ access_token=self.other_user_token,
+ )
+
+ self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
+
+ def test_invalid_parameter(self):
+ """
+ If parameters are invalid, an error is returned.
+ """
+ channel = self.make_request(
+ "POST",
+ self.url + "?before_ts=-1234",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
+ self.assertEqual(
+ "Query parameter before_ts must be a positive integer.",
+ channel.json_body["error"],
+ )
+
+ channel = self.make_request(
+ "POST",
+ self.url + f"?before_ts={INVALID_TIMESTAMP_IN_S}",
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
+ self.assertEqual(
+ "Query parameter before_ts you provided is from the year 1970. "
+ + "Double check that you are providing a timestamp in milliseconds.",
+ channel.json_body["error"],
+ )
diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py
index c9e2754b09..839442ddba 100644
--- a/tests/rest/admin/test_user.py
+++ b/tests/rest/admin/test_user.py
@@ -1180,9 +1180,8 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.other_user, device_id=None, valid_until_ms=None
)
)
- self.url_other_user = "/_synapse/admin/v2/users/%s" % urllib.parse.quote(
- self.other_user
- )
+ self.url_prefix = "/_synapse/admin/v2/users/%s"
+ self.url_other_user = self.url_prefix % self.other_user
def test_requester_is_no_admin(self):
"""
@@ -1738,6 +1737,93 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertEqual(0, len(channel.json_body["threepids"]))
self._check_fields(channel.json_body)
+ def test_set_duplicate_threepid(self):
+ """
+ Test setting the same threepid for a second user.
+ First user loses and second user gets mapping of this threepid.
+ """
+
+ # create a user to set a threepid
+ first_user = self.register_user("first_user", "pass")
+ url_first_user = self.url_prefix % first_user
+
+ # Add threepid to first user
+ channel = self.make_request(
+ "PUT",
+ url_first_user,
+ access_token=self.admin_user_tok,
+ content={
+ "threepids": [
+ {"medium": "email", "address": "bob1@bob.bob"},
+ ],
+ },
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(first_user, channel.json_body["name"])
+ self.assertEqual(1, len(channel.json_body["threepids"]))
+ self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+ self.assertEqual("bob1@bob.bob", channel.json_body["threepids"][0]["address"])
+ self._check_fields(channel.json_body)
+
+ # Add threepids to other user
+ channel = self.make_request(
+ "PUT",
+ self.url_other_user,
+ access_token=self.admin_user_tok,
+ content={
+ "threepids": [
+ {"medium": "email", "address": "bob2@bob.bob"},
+ ],
+ },
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual("@user:test", channel.json_body["name"])
+ self.assertEqual(1, len(channel.json_body["threepids"]))
+ self.assertEqual("email", channel.json_body["threepids"][0]["medium"])
+ self.assertEqual("bob2@bob.bob", channel.json_body["threepids"][0]["address"])
+ self._check_fields(channel.json_body)
+
+ # Add two new threepids to other user
+ # one is used by first_user
+ channel = self.make_request(
+ "PUT",
+ self.url_other_user,
+ access_token=self.admin_user_tok,
+ content={
+ "threepids": [
+ {"medium": "email", "address": "bob1@bob.bob"},
+ {"medium": "email", "address": "bob3@bob.bob"},
+ ],
+ },
+ )
+
+ # other user has this two threepids
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual("@user:test", channel.json_body["name"])
+ self.assertEqual(2, len(channel.json_body["threepids"]))
+ # result does not always have the same sort order, therefore it becomes sorted
+ sorted_result = sorted(
+ channel.json_body["threepids"], key=lambda k: k["address"]
+ )
+ self.assertEqual("email", sorted_result[0]["medium"])
+ self.assertEqual("bob1@bob.bob", sorted_result[0]["address"])
+ self.assertEqual("email", sorted_result[1]["medium"])
+ self.assertEqual("bob3@bob.bob", sorted_result[1]["address"])
+ self._check_fields(channel.json_body)
+
+ # first_user has no threepid anymore
+ channel = self.make_request(
+ "GET",
+ url_first_user,
+ access_token=self.admin_user_tok,
+ )
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(first_user, channel.json_body["name"])
+ self.assertEqual(0, len(channel.json_body["threepids"]))
+ self._check_fields(channel.json_body)
+
def test_set_external_id(self):
"""
Test setting external id for an other user.
@@ -1836,6 +1922,129 @@ class UserRestTestCase(unittest.HomeserverTestCase):
self.assertEqual("@user:test", channel.json_body["name"])
self.assertEqual(0, len(channel.json_body["external_ids"]))
+ def test_set_duplicate_external_id(self):
+ """
+ Test that setting the same external id for a second user fails and
+ external id from user must not be changed.
+ """
+
+ # create a user to use an external id
+ first_user = self.register_user("first_user", "pass")
+ url_first_user = self.url_prefix % first_user
+
+ # Add an external id to first user
+ channel = self.make_request(
+ "PUT",
+ url_first_user,
+ access_token=self.admin_user_tok,
+ content={
+ "external_ids": [
+ {
+ "external_id": "external_id1",
+ "auth_provider": "auth_provider",
+ },
+ ],
+ },
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(first_user, channel.json_body["name"])
+ self.assertEqual(1, len(channel.json_body["external_ids"]))
+ self.assertEqual(
+ "external_id1", channel.json_body["external_ids"][0]["external_id"]
+ )
+ self.assertEqual(
+ "auth_provider", channel.json_body["external_ids"][0]["auth_provider"]
+ )
+ self._check_fields(channel.json_body)
+
+ # Add an external id to other user
+ channel = self.make_request(
+ "PUT",
+ self.url_other_user,
+ access_token=self.admin_user_tok,
+ content={
+ "external_ids": [
+ {
+ "external_id": "external_id2",
+ "auth_provider": "auth_provider",
+ },
+ ],
+ },
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual("@user:test", channel.json_body["name"])
+ self.assertEqual(1, len(channel.json_body["external_ids"]))
+ self.assertEqual(
+ "external_id2", channel.json_body["external_ids"][0]["external_id"]
+ )
+ self.assertEqual(
+ "auth_provider", channel.json_body["external_ids"][0]["auth_provider"]
+ )
+ self._check_fields(channel.json_body)
+
+ # Add two new external_ids to other user
+ # one is used by first
+ channel = self.make_request(
+ "PUT",
+ self.url_other_user,
+ access_token=self.admin_user_tok,
+ content={
+ "external_ids": [
+ {
+ "external_id": "external_id1",
+ "auth_provider": "auth_provider",
+ },
+ {
+ "external_id": "external_id3",
+ "auth_provider": "auth_provider",
+ },
+ ],
+ },
+ )
+
+ # must fail
+ self.assertEqual(409, channel.code, msg=channel.json_body)
+ self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"])
+ self.assertEqual("External id is already in use.", channel.json_body["error"])
+
+ # other user must not changed
+ channel = self.make_request(
+ "GET",
+ self.url_other_user,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual("@user:test", channel.json_body["name"])
+ self.assertEqual(1, len(channel.json_body["external_ids"]))
+ self.assertEqual(
+ "external_id2", channel.json_body["external_ids"][0]["external_id"]
+ )
+ self.assertEqual(
+ "auth_provider", channel.json_body["external_ids"][0]["auth_provider"]
+ )
+ self._check_fields(channel.json_body)
+
+ # first user must not changed
+ channel = self.make_request(
+ "GET",
+ url_first_user,
+ access_token=self.admin_user_tok,
+ )
+
+ self.assertEqual(200, channel.code, msg=channel.json_body)
+ self.assertEqual(first_user, channel.json_body["name"])
+ self.assertEqual(1, len(channel.json_body["external_ids"]))
+ self.assertEqual(
+ "external_id1", channel.json_body["external_ids"][0]["external_id"]
+ )
+ self.assertEqual(
+ "auth_provider", channel.json_body["external_ids"][0]["auth_provider"]
+ )
+ self._check_fields(channel.json_body)
+
def test_deactivate_user(self):
"""
Test deactivating another user.
diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py
index be3ed64f5e..37cf7bb232 100644
--- a/tests/storage/test_user_directory.py
+++ b/tests/storage/test_user_directory.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-from typing import Any, Dict, List, Set, Tuple
+from typing import Any, Dict, Set, Tuple
from unittest import mock
from unittest.mock import Mock, patch
@@ -42,18 +42,7 @@ class GetUserDirectoryTables:
def __init__(self, store: DataStore):
self.store = store
- def _compress_shared(
- self, shared: List[Dict[str, str]]
- ) -> Set[Tuple[str, str, str]]:
- """
- Compress a list of users who share rooms dicts to a list of tuples.
- """
- r = set()
- for i in shared:
- r.add((i["user_id"], i["other_user_id"], i["room_id"]))
- return r
-
- async def get_users_in_public_rooms(self) -> List[Tuple[str, str]]:
+ async def get_users_in_public_rooms(self) -> Set[Tuple[str, str]]:
"""Fetch the entire `users_in_public_rooms` table.
Returns a list of tuples (user_id, room_id) where room_id is public and
@@ -63,24 +52,27 @@ class GetUserDirectoryTables:
"users_in_public_rooms", None, ("user_id", "room_id")
)
- retval = []
+ retval = set()
for i in r:
- retval.append((i["user_id"], i["room_id"]))
+ retval.add((i["user_id"], i["room_id"]))
return retval
- async def get_users_who_share_private_rooms(self) -> List[Dict[str, str]]:
+ async def get_users_who_share_private_rooms(self) -> Set[Tuple[str, str, str]]:
"""Fetch the entire `users_who_share_private_rooms` table.
- Returns a dict containing "user_id", "other_user_id" and "room_id" keys.
- The dicts can be flattened to Tuples with the `_compress_shared` method.
- (This seems a little awkward---maybe we could clean this up.)
+ Returns a set of tuples (user_id, other_user_id, room_id) corresponding
+ to the rows of `users_who_share_private_rooms`.
"""
- return await self.store.db_pool.simple_select_list(
+ rows = await self.store.db_pool.simple_select_list(
"users_who_share_private_rooms",
None,
["user_id", "other_user_id", "room_id"],
)
+ rv = set()
+ for row in rows:
+ rv.add((row["user_id"], row["other_user_id"], row["room_id"]))
+ return rv
async def get_users_in_user_directory(self) -> Set[str]:
"""Fetch the set of users in the `user_directory` table.
@@ -113,6 +105,16 @@ class GetUserDirectoryTables:
for row in rows
}
+ async def get_tables(
+ self,
+ ) -> Tuple[Set[str], Set[Tuple[str, str]], Set[Tuple[str, str, str]]]:
+ """Multiple tests want to inspect these tables, so expose them together."""
+ return (
+ await self.get_users_in_user_directory(),
+ await self.get_users_in_public_rooms(),
+ await self.get_users_who_share_private_rooms(),
+ )
+
class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
"""Ensure that rebuilding the directory writes the correct data to the DB.
@@ -166,8 +168,8 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
)
# Nothing updated yet
- self.assertEqual(shares_private, [])
- self.assertEqual(public_users, [])
+ self.assertEqual(shares_private, set())
+ self.assertEqual(public_users, set())
# Ugh, have to reset this flag
self.store.db_pool.updates._all_done = False
@@ -236,24 +238,15 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
# Do the initial population of the user directory via the background update
self._purge_and_rebuild_user_dir()
- shares_private = self.get_success(
- self.user_dir_helper.get_users_who_share_private_rooms()
- )
- public_users = self.get_success(
- self.user_dir_helper.get_users_in_public_rooms()
+ users, in_public, in_private = self.get_success(
+ self.user_dir_helper.get_tables()
)
# User 1 and User 2 are in the same public room
- self.assertEqual(set(public_users), {(u1, room), (u2, room)})
-
+ self.assertEqual(in_public, {(u1, room), (u2, room)})
# User 1 and User 3 share private rooms
- self.assertEqual(
- self.user_dir_helper._compress_shared(shares_private),
- {(u1, u3, private_room), (u3, u1, private_room)},
- )
-
+ self.assertEqual(in_private, {(u1, u3, private_room), (u3, u1, private_room)})
# All three should have entries in the directory
- users = self.get_success(self.user_dir_helper.get_users_in_user_directory())
self.assertEqual(users, {u1, u2, u3})
# The next four tests (test_population_excludes_*) all set up
@@ -289,16 +282,12 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase):
self, normal_user: str, public_room: str, private_room: str
) -> None:
# After rebuilding the directory, we should only see the normal user.
- users = self.get_success(self.user_dir_helper.get_users_in_user_directory())
- self.assertEqual(users, {normal_user})
- in_public_rooms = self.get_success(
- self.user_dir_helper.get_users_in_public_rooms()
+ users, in_public, in_private = self.get_success(
+ self.user_dir_helper.get_tables()
)
- self.assertEqual(set(in_public_rooms), {(normal_user, public_room)})
- in_private_rooms = self.get_success(
- self.user_dir_helper.get_users_who_share_private_rooms()
- )
- self.assertEqual(in_private_rooms, [])
+ self.assertEqual(users, {normal_user})
+ self.assertEqual(in_public, {(normal_user, public_room)})
+ self.assertEqual(in_private, set())
def test_population_excludes_support_user(self) -> None:
# Create a normal and support user.
diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py
index cf407c51cf..e2c506e5a4 100644
--- a/tests/test_event_auth.py
+++ b/tests/test_event_auth.py
@@ -24,6 +24,47 @@ from synapse.types import JsonDict, get_domain_from_id
class EventAuthTestCase(unittest.TestCase):
+ def test_rejected_auth_events(self):
+ """
+ Events that refer to rejected events in their auth events are rejected
+ """
+ creator = "@creator:example.com"
+ auth_events = [
+ _create_event(creator),
+ _join_event(creator),
+ ]
+
+ # creator should be able to send state
+ event_auth.check_auth_rules_for_event(
+ RoomVersions.V9,
+ _random_state_event(creator),
+ auth_events,
+ )
+
+ # ... but a rejected join_rules event should cause it to be rejected
+ rejected_join_rules = _join_rules_event(creator, "public")
+ rejected_join_rules.rejected_reason = "stinky"
+ auth_events.append(rejected_join_rules)
+
+ self.assertRaises(
+ AuthError,
+ event_auth.check_auth_rules_for_event,
+ RoomVersions.V9,
+ _random_state_event(creator),
+ auth_events,
+ )
+
+ # ... even if there is *also* a good join rules
+ auth_events.append(_join_rules_event(creator, "public"))
+
+ self.assertRaises(
+ AuthError,
+ event_auth.check_auth_rules_for_event,
+ RoomVersions.V9,
+ _random_state_event(creator),
+ auth_events,
+ )
+
def test_random_users_cannot_send_state_before_first_pl(self):
"""
Check that, before the first PL lands, the creator is the only user
@@ -31,11 +72,11 @@ class EventAuthTestCase(unittest.TestCase):
"""
creator = "@creator:example.com"
joiner = "@joiner:example.com"
- auth_events = {
- ("m.room.create", ""): _create_event(creator),
- ("m.room.member", creator): _join_event(creator),
- ("m.room.member", joiner): _join_event(joiner),
- }
+ auth_events = [
+ _create_event(creator),
+ _join_event(creator),
+ _join_event(joiner),
+ ]
# creator should be able to send state
event_auth.check_auth_rules_for_event(
@@ -62,15 +103,15 @@ class EventAuthTestCase(unittest.TestCase):
pleb = "@joiner:example.com"
king = "@joiner2:example.com"
- auth_events = {
- ("m.room.create", ""): _create_event(creator),
- ("m.room.member", creator): _join_event(creator),
- ("m.room.power_levels", ""): _power_levels_event(
+ auth_events = [
+ _create_event(creator),
+ _join_event(creator),
+ _power_levels_event(
creator, {"state_default": "30", "users": {pleb: "29", king: "30"}}
),
- ("m.room.member", pleb): _join_event(pleb),
- ("m.room.member", king): _join_event(king),
- }
+ _join_event(pleb),
+ _join_event(king),
+ ]
# pleb should not be able to send state
self.assertRaises(
@@ -92,10 +133,10 @@ class EventAuthTestCase(unittest.TestCase):
"""Alias events have special behavior up through room version 6."""
creator = "@creator:example.com"
other = "@other:example.com"
- auth_events = {
- ("m.room.create", ""): _create_event(creator),
- ("m.room.member", creator): _join_event(creator),
- }
+ auth_events = [
+ _create_event(creator),
+ _join_event(creator),
+ ]
# creator should be able to send aliases
event_auth.check_auth_rules_for_event(
@@ -131,10 +172,10 @@ class EventAuthTestCase(unittest.TestCase):
"""After MSC2432, alias events have no special behavior."""
creator = "@creator:example.com"
other = "@other:example.com"
- auth_events = {
- ("m.room.create", ""): _create_event(creator),
- ("m.room.member", creator): _join_event(creator),
- }
+ auth_events = [
+ _create_event(creator),
+ _join_event(creator),
+ ]
# creator should be able to send aliases
event_auth.check_auth_rules_for_event(
@@ -170,14 +211,14 @@ class EventAuthTestCase(unittest.TestCase):
creator = "@creator:example.com"
pleb = "@joiner:example.com"
- auth_events = {
- ("m.room.create", ""): _create_event(creator),
- ("m.room.member", creator): _join_event(creator),
- ("m.room.power_levels", ""): _power_levels_event(
+ auth_events = [
+ _create_event(creator),
+ _join_event(creator),
+ _power_levels_event(
creator, {"state_default": "30", "users": {pleb: "30"}}
),
- ("m.room.member", pleb): _join_event(pleb),
- }
+ _join_event(pleb),
+ ]
# pleb should be able to modify the notifications power level.
event_auth.check_auth_rules_for_event(
@@ -211,7 +252,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# A user cannot be force-joined to a room.
@@ -219,7 +260,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_member_event(pleb, "join", sender=creator),
- auth_events,
+ auth_events.values(),
)
# Banned should be rejected.
@@ -228,7 +269,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# A user who left can re-join.
@@ -236,7 +277,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# A user can send a join if they're in the room.
@@ -244,7 +285,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# A user can accept an invite.
@@ -254,7 +295,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
def test_join_rules_invite(self):
@@ -275,7 +316,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# A user cannot be force-joined to a room.
@@ -283,7 +324,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_member_event(pleb, "join", sender=creator),
- auth_events,
+ auth_events.values(),
)
# Banned should be rejected.
@@ -292,7 +333,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# A user who left cannot re-join.
@@ -301,7 +342,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# A user can send a join if they're in the room.
@@ -309,7 +350,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# A user can accept an invite.
@@ -319,7 +360,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
def test_join_rules_msc3083_restricted(self):
@@ -347,7 +388,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V6,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# A properly formatted join event should work.
@@ -360,7 +401,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V8,
authorised_join_event,
- auth_events,
+ auth_events.values(),
)
# A join issued by a specific user works (i.e. the power level checks
@@ -380,7 +421,7 @@ class EventAuthTestCase(unittest.TestCase):
EventContentFields.AUTHORISING_USER: "@inviter:foo.test"
},
),
- pl_auth_events,
+ pl_auth_events.values(),
)
# A join which is missing an authorised server is rejected.
@@ -388,7 +429,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V8,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# An join authorised by a user who is not in the room is rejected.
@@ -405,7 +446,7 @@ class EventAuthTestCase(unittest.TestCase):
EventContentFields.AUTHORISING_USER: "@other:example.com"
},
),
- auth_events,
+ auth_events.values(),
)
# A user cannot be force-joined to a room. (This uses an event which
@@ -421,7 +462,7 @@ class EventAuthTestCase(unittest.TestCase):
EventContentFields.AUTHORISING_USER: "@inviter:foo.test"
},
),
- auth_events,
+ auth_events.values(),
)
# Banned should be rejected.
@@ -430,7 +471,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V8,
authorised_join_event,
- auth_events,
+ auth_events.values(),
)
# A user who left can re-join.
@@ -438,7 +479,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V8,
authorised_join_event,
- auth_events,
+ auth_events.values(),
)
# A user can send a join if they're in the room. (This doesn't need to
@@ -447,7 +488,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V8,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
# A user can accept an invite. (This doesn't need to be authorised since
@@ -458,7 +499,7 @@ class EventAuthTestCase(unittest.TestCase):
event_auth.check_auth_rules_for_event(
RoomVersions.V8,
_join_event(pleb),
- auth_events,
+ auth_events.values(),
)
@@ -473,6 +514,7 @@ def _create_event(user_id: str) -> EventBase:
"room_id": TEST_ROOM_ID,
"event_id": _get_event_id(),
"type": "m.room.create",
+ "state_key": "",
"sender": user_id,
"content": {"creator": user_id},
}
|