diff --git a/changelog.d/10250.bugfix b/changelog.d/10250.bugfix
new file mode 100644
index 0000000000..a8107dafb2
--- /dev/null
+++ b/changelog.d/10250.bugfix
@@ -0,0 +1 @@
+Add base starting insertion event when no chunk ID is specified in the historical batch send API.
diff --git a/changelog.d/10289.misc b/changelog.d/10289.misc
new file mode 100644
index 0000000000..2df30e7a7a
--- /dev/null
+++ b/changelog.d/10289.misc
@@ -0,0 +1 @@
+Convert `room_depth.min_depth` column to a `BIGINT`.
diff --git a/changelog.d/10305.misc b/changelog.d/10305.misc
new file mode 100644
index 0000000000..8488d47f6f
--- /dev/null
+++ b/changelog.d/10305.misc
@@ -0,0 +1 @@
+Additional unit tests for the spaces summary API.
diff --git a/changelog.d/10313.doc b/changelog.d/10313.doc
new file mode 100644
index 0000000000..44086e3d9d
--- /dev/null
+++ b/changelog.d/10313.doc
@@ -0,0 +1 @@
+Simplify structure of room admin API.
\ No newline at end of file
diff --git a/changelog.d/10316.misc b/changelog.d/10316.misc
new file mode 100644
index 0000000000..1fd0810fde
--- /dev/null
+++ b/changelog.d/10316.misc
@@ -0,0 +1 @@
+Rebuild event context and auth when processing specific results from `ThirdPartyEventRules` modules.
diff --git a/changelog.d/10317.bugfix b/changelog.d/10317.bugfix
new file mode 100644
index 0000000000..826c269eff
--- /dev/null
+++ b/changelog.d/10317.bugfix
@@ -0,0 +1 @@
+Fix purging rooms that other homeservers are still sending events for. Contributed by @ilmari.
diff --git a/changelog.d/10322.doc b/changelog.d/10322.doc
new file mode 100644
index 0000000000..db604cf2aa
--- /dev/null
+++ b/changelog.d/10322.doc
@@ -0,0 +1 @@
+Fix a broken link in the admin api docs.
diff --git a/changelog.d/10324.misc b/changelog.d/10324.misc
new file mode 100644
index 0000000000..3c3ee6d6fc
--- /dev/null
+++ b/changelog.d/10324.misc
@@ -0,0 +1 @@
+Minor change to the code that populates `user_daily_visits`.
diff --git a/changelog.d/10337.doc b/changelog.d/10337.doc
new file mode 100644
index 0000000000..f305bdb3ba
--- /dev/null
+++ b/changelog.d/10337.doc
@@ -0,0 +1 @@
+Fix formatting in the logcontext documentation.
diff --git a/changelog.d/10343.bugfix b/changelog.d/10343.bugfix
new file mode 100644
index 0000000000..53ccf79a81
--- /dev/null
+++ b/changelog.d/10343.bugfix
@@ -0,0 +1 @@
+Fix errors during backfill caused by previously purged redaction events. Contributed by Andreas Rammhold (@andir).
diff --git a/changelog.d/10344.bugfix b/changelog.d/10344.bugfix
new file mode 100644
index 0000000000..ab6eb4999f
--- /dev/null
+++ b/changelog.d/10344.bugfix
@@ -0,0 +1 @@
+Fix the user directory becoming broken (and noisy errors being logged) when knocking and room statistics are in use.
diff --git a/changelog.d/10345.misc b/changelog.d/10345.misc
new file mode 100644
index 0000000000..7424486e87
--- /dev/null
+++ b/changelog.d/10345.misc
@@ -0,0 +1 @@
+Re-enable Sytests that were disabled for the 1.37.1 release.
diff --git a/changelog.d/10347.misc b/changelog.d/10347.misc
new file mode 100644
index 0000000000..b2275a1350
--- /dev/null
+++ b/changelog.d/10347.misc
@@ -0,0 +1 @@
+Run `pyupgrade` on the codebase.
\ No newline at end of file
diff --git a/changelog.d/10349.misc b/changelog.d/10349.misc
new file mode 100644
index 0000000000..5b014e7416
--- /dev/null
+++ b/changelog.d/10349.misc
@@ -0,0 +1 @@
+Switch `application_services_txns.txn_id` database column to `BIGINT`.
diff --git a/changelog.d/10350.misc b/changelog.d/10350.misc
new file mode 100644
index 0000000000..eed2d8552a
--- /dev/null
+++ b/changelog.d/10350.misc
@@ -0,0 +1 @@
+Convert internal type variable syntax to reflect wider ecosystem use.
\ No newline at end of file
diff --git a/changelog.d/10355.bugfix b/changelog.d/10355.bugfix
new file mode 100644
index 0000000000..92df612011
--- /dev/null
+++ b/changelog.d/10355.bugfix
@@ -0,0 +1 @@
+Fix newly added `synapse_federation_server_oldest_inbound_pdu_in_staging` prometheus metric to measure age rather than timestamp.
diff --git a/changelog.d/10357.misc b/changelog.d/10357.misc
new file mode 100644
index 0000000000..7424486e87
--- /dev/null
+++ b/changelog.d/10357.misc
@@ -0,0 +1 @@
+Re-enable Sytests that were disabled for the 1.37.1 release.
diff --git a/changelog.d/10367.bugfix b/changelog.d/10367.bugfix
new file mode 100644
index 0000000000..b445556084
--- /dev/null
+++ b/changelog.d/10367.bugfix
@@ -0,0 +1 @@
+Bugfix `make_room_admin` fails for users that have left a private room.
\ No newline at end of file
diff --git a/changelog.d/10370.doc b/changelog.d/10370.doc
new file mode 100644
index 0000000000..8c59d98ee8
--- /dev/null
+++ b/changelog.d/10370.doc
@@ -0,0 +1 @@
+Fix some links in `docs` and `contrib`.
\ No newline at end of file
diff --git a/changelog.d/9721.removal b/changelog.d/9721.removal
new file mode 100644
index 0000000000..da2ba48c84
--- /dev/null
+++ b/changelog.d/9721.removal
@@ -0,0 +1 @@
+Remove functionality associated with the unused `room_stats_historical` and `user_stats_historical` tables. Contributed by @xmunoz.
diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml
index d1ecd453db..26d640c448 100644
--- a/contrib/docker/docker-compose.yml
+++ b/contrib/docker/docker-compose.yml
@@ -56,7 +56,7 @@ services:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=changeme
# ensure the database gets created correctly
- # https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
+ # https://matrix-org.github.io/synapse/latest/postgres.html#set-up-database
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
# You may store the database tables in a local folder..
diff --git a/contrib/grafana/README.md b/contrib/grafana/README.md
index 4608793394..0d4e1b59b2 100644
--- a/contrib/grafana/README.md
+++ b/contrib/grafana/README.md
@@ -1,6 +1,6 @@
# Using the Synapse Grafana dashboard
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
-1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
+1. Have your Prometheus scrape your Synapse. https://matrix-org.github.io/synapse/latest/metrics-howto.html
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
-3. Set up required recording rules. https://github.com/matrix-org/synapse/tree/master/contrib/prometheus
+3. Set up required recording rules. [contrib/prometheus](../prometheus)
diff --git a/contrib/prometheus/README.md b/contrib/prometheus/README.md
index b3f23bcc80..4dbf648df8 100644
--- a/contrib/prometheus/README.md
+++ b/contrib/prometheus/README.md
@@ -34,7 +34,7 @@ Add a new job to the main prometheus.yml file:
```
An example of a Prometheus configuration with workers can be found in
-[metrics-howto.md](https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md).
+[metrics-howto.md](https://matrix-org.github.io/synapse/latest/metrics-howto.html).
To use `synapse.rules` add
diff --git a/contrib/purge_api/README.md b/contrib/purge_api/README.md
index 06b4cdb9f7..2f2e5c58cd 100644
--- a/contrib/purge_api/README.md
+++ b/contrib/purge_api/README.md
@@ -3,8 +3,9 @@ Purge history API examples
# `purge_history.sh`
-A bash file, that uses the [purge history API](/docs/admin_api/purge_history_api.rst) to
-purge all messages in a list of rooms up to a certain event. You can select a
+A bash file, that uses the
+[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
+to purge all messages in a list of rooms up to a certain event. You can select a
timeframe or a number of messages that you want to keep in the room.
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
@@ -12,5 +13,6 @@ the script.
# `purge_remote_media.sh`
-A bash file, that uses the [purge history API](/docs/admin_api/purge_history_api.rst) to
-purge all old cached remote media.
+A bash file, that uses the
+[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
+to purge all old cached remote media.
diff --git a/contrib/purge_api/purge_history.sh b/contrib/purge_api/purge_history.sh
index c45136ff53..9d5324ea1c 100644
--- a/contrib/purge_api/purge_history.sh
+++ b/contrib/purge_api/purge_history.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
# this script will use the api:
-# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
+# https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html
#
# It will purge all messages in a list of rooms up to a cetrain event
diff --git a/contrib/systemd-with-workers/README.md b/contrib/systemd-with-workers/README.md
index 8d21d532bd..9b19b042e9 100644
--- a/contrib/systemd-with-workers/README.md
+++ b/contrib/systemd-with-workers/README.md
@@ -1,2 +1,3 @@
The documentation for using systemd to manage synapse workers is now part of
-the main synapse distribution. See [docs/systemd-with-workers](../../docs/systemd-with-workers).
+the main synapse distribution. See
+[docs/systemd-with-workers](https://matrix-org.github.io/synapse/latest/systemd-with-workers/index.html).
diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md
index b033fc03ef..61bed1e0d5 100644
--- a/docs/admin_api/media_admin_api.md
+++ b/docs/admin_api/media_admin_api.md
@@ -47,7 +47,7 @@ The API returns a JSON body like the following:
## List all media uploaded by a user
Listing all media that has been uploaded by a local user can be achieved through
-the use of the [List media of a user](user_admin_api.rst#list-media-of-a-user)
+the use of the [List media of a user](user_admin_api.md#list-media-of-a-user)
Admin API.
# Quarantine media
@@ -257,7 +257,7 @@ URL Parameters
* `server_name`: string - The name of your local server (e.g `matrix.org`).
* `before_ts`: string representing a positive integer - Unix timestamp in ms.
Files that were last used before this timestamp will be deleted. It is the timestamp of
-last access and not the timestamp creation.
+last access and not the timestamp creation.
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
Files that are larger will be deleted. Defaults to `0`.
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files
diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md
index bb7828a525..48777dd231 100644
--- a/docs/admin_api/rooms.md
+++ b/docs/admin_api/rooms.md
@@ -1,13 +1,9 @@
# Contents
- [List Room API](#list-room-api)
- * [Parameters](#parameters)
- * [Usage](#usage)
- [Room Details API](#room-details-api)
- [Room Members API](#room-members-api)
- [Room State API](#room-state-api)
- [Delete Room API](#delete-room-api)
- * [Parameters](#parameters-1)
- * [Response](#response)
* [Undoing room shutdowns](#undoing-room-shutdowns)
- [Make Room Admin API](#make-room-admin-api)
- [Forward Extremities Admin API](#forward-extremities-admin-api)
@@ -19,7 +15,7 @@ The List Room admin API allows server admins to get a list of rooms on their
server. There are various parameters available that allow for filtering and
sorting the returned list. This API supports pagination.
-## Parameters
+**Parameters**
The following query parameters are available:
@@ -46,6 +42,8 @@ The following query parameters are available:
* `search_term` - Filter rooms by their room name. Search term can be contained in any
part of the room name. Defaults to no filtering.
+**Response**
+
The following fields are possible in the JSON response body:
* `rooms` - An array of objects, each containing information about a room.
@@ -79,17 +77,15 @@ The following fields are possible in the JSON response body:
Use `prev_batch` for the `from` value in the next request to
get the "previous page" of results.
-## Usage
+The API is:
A standard request with no filtering:
```
GET /_synapse/admin/v1/rooms
-
-{}
```
-Response:
+A response body like the following is returned:
```jsonc
{
@@ -137,11 +133,9 @@ Filtering by room name:
```
GET /_synapse/admin/v1/rooms?search_term=TWIM
-
-{}
```
-Response:
+A response body like the following is returned:
```json
{
@@ -172,11 +166,9 @@ Paginating through a list of rooms:
```
GET /_synapse/admin/v1/rooms?order_by=size
-
-{}
```
-Response:
+A response body like the following is returned:
```jsonc
{
@@ -228,11 +220,9 @@ parameter to the value of `next_token`.
```
GET /_synapse/admin/v1/rooms?order_by=size&from=100
-
-{}
```
-Response:
+A response body like the following is returned:
```jsonc
{
@@ -304,17 +294,13 @@ The following fields are possible in the JSON response body:
* `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
* `state_events` - Total number of state_events of a room. Complexity of the room.
-## Usage
-
-A standard request:
+The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>
-
-{}
```
-Response:
+A response body like the following is returned:
```json
{
@@ -347,17 +333,13 @@ The response includes the following fields:
* `members` - A list of all the members that are present in the room, represented by their ids.
* `total` - Total number of members in the room.
-## Usage
-
-A standard request:
+The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>/members
-
-{}
```
-Response:
+A response body like the following is returned:
```json
{
@@ -378,17 +360,13 @@ The response includes the following fields:
* `state` - The current state of the room at the time of request.
-## Usage
-
-A standard request:
+The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>/state
-
-{}
```
-Response:
+A response body like the following is returned:
```json
{
@@ -432,6 +410,7 @@ DELETE /_synapse/admin/v1/rooms/<room_id>
```
with a body of:
+
```json
{
"new_room_user_id": "@someuser:example.com",
@@ -461,7 +440,7 @@ A response body like the following is returned:
}
```
-## Parameters
+**Parameters**
The following parameters should be set in the URL:
@@ -491,7 +470,7 @@ The following JSON body parameters are available:
The JSON body must not be empty. The body must be at least `{}`.
-## Response
+**Response**
The following fields are returned in the JSON response body:
@@ -548,10 +527,10 @@ By default the server admin (the caller) is granted power, but another user can
optionally be specified, e.g.:
```
- POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
- {
- "user_id": "@foo:example.com"
- }
+POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
+{
+ "user_id": "@foo:example.com"
+}
```
# Forward Extremities Admin API
@@ -565,7 +544,7 @@ extremities accumulate in a room, performance can become degraded. For details,
To check the status of forward extremities for a room:
```
- GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
+GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
```
A response as follows will be returned:
@@ -581,7 +560,7 @@ A response as follows will be returned:
"received_ts": 1611263016761
}
]
-}
+}
```
## Deleting forward extremities
@@ -594,7 +573,7 @@ If a room has lots of forward extremities, the extra can be
deleted as follows:
```
- DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
+DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
```
A response as follows will be returned, indicating the amount of forward extremities
diff --git a/docs/log_contexts.md b/docs/log_contexts.md
index fe30ca2791..9a43d46091 100644
--- a/docs/log_contexts.md
+++ b/docs/log_contexts.md
@@ -17,7 +17,7 @@ class).
Deferreds make the whole thing complicated, so this document describes
how it all works, and how to write code which follows the rules.
-##Logcontexts without Deferreds
+## Logcontexts without Deferreds
In the absence of any Deferred voodoo, things are simple enough. As with
any code of this nature, the rule is that our function should leave
diff --git a/docs/room_and_user_statistics.md b/docs/room_and_user_statistics.md
index e1facb38d4..cc38c890bb 100644
--- a/docs/room_and_user_statistics.md
+++ b/docs/room_and_user_statistics.md
@@ -1,9 +1,9 @@
Room and User Statistics
========================
-Synapse maintains room and user statistics (as well as a cache of room state),
-in various tables. These can be used for administrative purposes but are also
-used when generating the public room directory.
+Synapse maintains room and user statistics in various tables. These can be used
+for administrative purposes but are also used when generating the public room
+directory.
# Synapse Developer Documentation
@@ -15,48 +15,8 @@ used when generating the public room directory.
* **subject**: Something we are tracking stats about â currently a room or user.
* **current row**: An entry for a subject in the appropriate current statistics
table. Each subject can have only one.
-* **historical row**: An entry for a subject in the appropriate historical
- statistics table. Each subject can have any number of these.
### Overview
-Stats are maintained as time series. There are two kinds of column:
-
-* absolute columns â where the value is correct for the time given by `end_ts`
- in the stats row. (Imagine a line graph for these values)
- * They can also be thought of as 'gauges' in Prometheus, if you are familiar.
-* per-slice columns â where the value corresponds to how many of the occurrences
- occurred within the time slice given by `(end_ts â bucket_size)âŠend_ts`
- or `start_tsâŠend_ts`. (Imagine a histogram for these values)
-
-Stats are maintained in two tables (for each type): current and historical.
-
-Current stats correspond to the present values. Each subject can only have one
-entry.
-
-Historical stats correspond to values in the past. Subjects may have multiple
-entries.
-
-## Concepts around the management of stats
-
-### Current rows
-
-Current rows contain the most up-to-date statistics for a room.
-They only contain absolute columns
-
-### Historical rows
-
-Historical rows can always be considered to be valid for the time slice and
-end time specified.
-
-* historical rows will not exist for every time slice â they will be omitted
- if there were no changes. In this case, the following assumptions can be
- made to interpolate/recreate missing rows:
- - absolute fields have the same values as in the preceding row
- - per-slice fields are zero (`0`)
-* historical rows will not be retained forever â rows older than a configurable
- time will be purged.
-
-#### Purge
-
-The purging of historical rows is not yet implemented.
+Stats correspond to the present values. Current rows contain the most up-to-date
+statistics for a room. Each subject can only have one entry.
diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml
index 054770f71f..a45732a246 100644
--- a/docs/sample_config.yaml
+++ b/docs/sample_config.yaml
@@ -2653,11 +2653,6 @@ stats:
#
#enabled: false
- # The size of each timeslice in the room_stats_historical and
- # user_stats_historical tables, as a time period. Defaults to "1d".
- #
- #bucket_size: 1h
-
# Server Notices room configuration
#
diff --git a/docs/systemd-with-workers/README.md b/docs/systemd-with-workers/README.md
index a7de2de88a..3237ba4e93 100644
--- a/docs/systemd-with-workers/README.md
+++ b/docs/systemd-with-workers/README.md
@@ -15,9 +15,11 @@ contains an example configuration for the `federation_reader` worker.
## Synapse configuration files
See [workers.md](../workers.md) for information on how to set up the
-configuration files and reverse-proxy correctly. You can find an example worker
-config in the [workers](https://github.com/matrix-org/synapse/tree/develop/docs/systemd-with-workers/workers/)
-folder.
+configuration files and reverse-proxy correctly.
+Below is a sample `federation_reader` worker configuration file.
+```yaml
+{{#include workers/federation_reader.yaml}}
+```
Systemd manages daemonization itself, so ensure that none of the configuration
files set either `daemonize` or `worker_daemonize`.
@@ -72,12 +74,12 @@ systemctl restart matrix-synapse.target
**Optional:** If further hardening is desired, the file
`override-hardened.conf` may be copied from
-`contrib/systemd/override-hardened.conf` in this repository to the location
+[contrib/systemd/override-hardened.conf](https://github.com/matrix-org/synapse/tree/develop/contrib/systemd/)
+in this repository to the location
`/etc/systemd/system/matrix-synapse.service.d/override-hardened.conf` (the
directory may have to be created). It enables certain sandboxing features in
systemd to further secure the synapse service. You may read the comments to
-understand what the override file is doing. The same file will need to be copied
-to
+understand what the override file is doing. The same file will need to be copied to
`/etc/systemd/system/matrix-synapse-worker@.service.d/override-hardened-worker.conf`
(this directory may also have to be created) in order to apply the same
hardening options to any worker processes.
diff --git a/docs/usage/configuration/logging_sample_config.md b/docs/usage/configuration/logging_sample_config.md
index 4c4bc6fc16..a673d487b8 100644
--- a/docs/usage/configuration/logging_sample_config.md
+++ b/docs/usage/configuration/logging_sample_config.md
@@ -11,4 +11,4 @@ a fresh config using Synapse by following the instructions in
```yaml
{{#include ../../sample_log_config.yaml}}
-``__`
\ No newline at end of file
+```
\ No newline at end of file
diff --git a/synapse/config/stats.py b/synapse/config/stats.py
index 78f61fe9da..6f253e00c0 100644
--- a/synapse/config/stats.py
+++ b/synapse/config/stats.py
@@ -38,13 +38,9 @@ class StatsConfig(Config):
def read_config(self, config, **kwargs):
self.stats_enabled = True
- self.stats_bucket_size = 86400 * 1000
stats_config = config.get("stats", None)
if stats_config:
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
- self.stats_bucket_size = self.parse_duration(
- stats_config.get("bucket_size", "1d")
- )
if not self.stats_enabled:
logger.warning(ROOM_STATS_DISABLED_WARN)
@@ -59,9 +55,4 @@ class StatsConfig(Config):
# correctly.
#
#enabled: false
-
- # The size of each timeslice in the room_stats_historical and
- # user_stats_historical tables, as a time period. Defaults to "1d".
- #
- #bucket_size: 1h
"""
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 66e40a915d..e06655f3d4 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -518,6 +518,9 @@ class EventCreationHandler:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
+ historical: Indicates whether the message is being inserted
+ back in time around some existing events. This is used to skip
+ a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -772,6 +775,7 @@ class EventCreationHandler:
txn_id: Optional[str] = None,
ignore_shadow_ban: bool = False,
outlier: bool = False,
+ historical: bool = False,
depth: Optional[int] = None,
) -> Tuple[EventBase, int]:
"""
@@ -799,6 +803,9 @@ class EventCreationHandler:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
+ historical: Indicates whether the message is being inserted
+ back in time around some existing events. This is used to skip
+ a few checks and mark the event as backfilled.
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
@@ -847,6 +854,7 @@ class EventCreationHandler:
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
outlier=outlier,
+ historical=historical,
depth=depth,
)
@@ -1594,11 +1602,13 @@ class EventCreationHandler:
for k, v in original_event.internal_metadata.get_dict().items():
setattr(builder.internal_metadata, k, v)
- # the event type hasn't changed, so there's no point in re-calculating the
- # auth events.
+ # modules can send new state events, so we re-calculate the auth events just in
+ # case.
+ prev_event_ids = await self.store.get_prev_events_for_room(builder.room_id)
+
event = await builder.build(
- prev_event_ids=original_event.prev_event_ids(),
- auth_event_ids=original_event.auth_event_ids(),
+ prev_event_ids=prev_event_ids,
+ auth_event_ids=None,
)
# we rebuild the event context, to be on the safe side. If nothing else,
diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py
index f782d9db32..0059ad0f56 100644
--- a/synapse/handlers/receipts.py
+++ b/synapse/handlers/receipts.py
@@ -30,6 +30,8 @@ class ReceiptsHandler(BaseHandler):
self.server_name = hs.config.server_name
self.store = hs.get_datastore()
+ self.event_auth_handler = hs.get_event_auth_handler()
+
self.hs = hs
# We only need to poke the federation sender explicitly if its on the
@@ -59,6 +61,19 @@ class ReceiptsHandler(BaseHandler):
"""Called when we receive an EDU of type m.receipt from a remote HS."""
receipts = []
for room_id, room_values in content.items():
+ # If we're not in the room just ditch the event entirely. This is
+ # probably an old server that has come back and thinks we're still in
+ # the room (or we've been rejoined to the room by a state reset).
+ is_in_room = await self.event_auth_handler.check_host_in_room(
+ room_id, self.server_name
+ )
+ if not is_in_room:
+ logger.info(
+ "Ignoring receipt from %s as we're not in the room",
+ origin,
+ )
+ continue
+
for receipt_type, users in room_values.items():
for user_id, user_values in users.items():
if get_domain_from_id(user_id) != origin:
diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py
index 4e45d1da57..814d08efcb 100644
--- a/synapse/handlers/stats.py
+++ b/synapse/handlers/stats.py
@@ -45,7 +45,6 @@ class StatsHandler:
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
- self.stats_bucket_size = hs.config.stats_bucket_size
self.stats_enabled = hs.config.stats_enabled
@@ -106,20 +105,6 @@ class StatsHandler:
room_deltas = {}
user_deltas = {}
- # Then count deltas for total_events and total_event_bytes.
- (
- room_count,
- user_count,
- ) = await self.store.get_changes_room_total_events_and_bytes(
- self.pos, max_pos
- )
-
- for room_id, fields in room_count.items():
- room_deltas.setdefault(room_id, Counter()).update(fields)
-
- for user_id, fields in user_count.items():
- user_deltas.setdefault(user_id, Counter()).update(fields)
-
logger.debug("room_deltas: %s", room_deltas)
logger.debug("user_deltas: %s", user_deltas)
@@ -181,12 +166,10 @@ class StatsHandler:
event_content = {} # type: JsonDict
- sender = None
if event_id is not None:
event = await self.store.get_event(event_id, allow_none=True)
if event:
event_content = event.content or {}
- sender = event.sender
# All the values in this dict are deltas (RELATIVE changes)
room_stats_delta = room_to_stats_deltas.setdefault(room_id, Counter())
@@ -244,12 +227,6 @@ class StatsHandler:
room_stats_delta["joined_members"] += 1
elif membership == Membership.INVITE:
room_stats_delta["invited_members"] += 1
-
- if sender and self.is_mine_id(sender):
- user_to_stats_deltas.setdefault(sender, Counter())[
- "invites_sent"
- ] += 1
-
elif membership == Membership.LEAVE:
room_stats_delta["left_members"] += 1
elif membership == Membership.BAN:
@@ -279,10 +256,6 @@ class StatsHandler:
room_state["is_federatable"] = (
event_content.get("m.federate", True) is True
)
- if sender and self.is_mine_id(sender):
- user_to_stats_deltas.setdefault(sender, Counter())[
- "rooms_created"
- ] += 1
elif typ == EventTypes.JoinRules:
room_state["join_rules"] = event_content.get("join_rule")
elif typ == EventTypes.RoomHistoryVisibility:
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index e22393adc4..c0a8364755 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -208,6 +208,7 @@ class TypingWriterHandler(FollowerTypingHandler):
self.auth = hs.get_auth()
self.notifier = hs.get_notifier()
+ self.event_auth_handler = hs.get_event_auth_handler()
self.hs = hs
@@ -326,6 +327,19 @@ class TypingWriterHandler(FollowerTypingHandler):
room_id = content["room_id"]
user_id = content["user_id"]
+ # If we're not in the room just ditch the event entirely. This is
+ # probably an old server that has come back and thinks we're still in
+ # the room (or we've been rejoined to the room by a state reset).
+ is_in_room = await self.event_auth_handler.check_host_in_room(
+ room_id, self.server_name
+ )
+ if not is_in_room:
+ logger.info(
+ "Ignoring typing update from %s as we're not in the room",
+ origin,
+ )
+ return
+
member = RoomMember(user_id=user_id, room_id=room_id)
# Check that the string is a valid user id
diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py
index f0cddd2d2c..3c51a742bf 100644
--- a/synapse/rest/admin/rooms.py
+++ b/synapse/rest/admin/rooms.py
@@ -462,6 +462,7 @@ class MakeRoomAdminRestServlet(ResolveRoomIdMixin, RestServlet):
super().__init__(hs)
self.hs = hs
self.auth = hs.get_auth()
+ self.store = hs.get_datastore()
self.event_creation_handler = hs.get_event_creation_handler()
self.state_handler = hs.get_state_handler()
self.is_mine_id = hs.is_mine_id
@@ -500,7 +501,13 @@ class MakeRoomAdminRestServlet(ResolveRoomIdMixin, RestServlet):
admin_user_id = None
for admin_user in reversed(admin_users):
- if room_state.get((EventTypes.Member, admin_user)):
+ (
+ current_membership_type,
+ _,
+ ) = await self.store.get_local_current_membership_for_user_in_room(
+ admin_user, room_id
+ )
+ if current_membership_type == "join":
admin_user_id = admin_user
break
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 92ebe838fd..9c58e3689e 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -349,6 +349,35 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet):
return depth
+ def _create_insertion_event_dict(
+ self, sender: str, room_id: str, origin_server_ts: int
+ ):
+ """Creates an event dict for an "insertion" event with the proper fields
+ and a random chunk ID.
+
+ Args:
+ sender: The event author MXID
+ room_id: The room ID that the event belongs to
+ origin_server_ts: Timestamp when the event was sent
+
+ Returns:
+ Tuple of event ID and stream ordering position
+ """
+
+ next_chunk_id = random_string(8)
+ insertion_event = {
+ "type": EventTypes.MSC2716_INSERTION,
+ "sender": sender,
+ "room_id": room_id,
+ "content": {
+ EventContentFields.MSC2716_NEXT_CHUNK_ID: next_chunk_id,
+ EventContentFields.MSC2716_HISTORICAL: True,
+ },
+ "origin_server_ts": origin_server_ts,
+ }
+
+ return insertion_event
+
async def on_POST(self, request, room_id):
requester = await self.auth.get_user_by_req(request, allow_guest=False)
@@ -449,37 +478,68 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet):
events_to_create = body["events"]
- # If provided, connect the chunk to the last insertion point
- # The chunk ID passed in comes from the chunk_id in the
- # "insertion" event from the previous chunk.
+ prev_event_ids = prev_events_from_query
+ inherited_depth = await self.inherit_depth_from_prev_ids(prev_events_from_query)
+
+ # Figure out which chunk to connect to. If they passed in
+ # chunk_id_from_query let's use it. The chunk ID passed in comes
+ # from the chunk_id in the "insertion" event from the previous chunk.
+ last_event_in_chunk = events_to_create[-1]
+ chunk_id_to_connect_to = chunk_id_from_query
+ base_insertion_event = None
if chunk_id_from_query:
- last_event_in_chunk = events_to_create[-1]
- last_event_in_chunk["content"][
- EventContentFields.MSC2716_CHUNK_ID
- ] = chunk_id_from_query
+ # TODO: Verify the chunk_id_from_query corresponds to an insertion event
+ pass
+ # Otherwise, create an insertion event to act as a starting point.
+ #
+ # We don't always have an insertion event to start hanging more history
+ # off of (ideally there would be one in the main DAG, but that's not the
+ # case if we're wanting to add history to e.g. existing rooms without
+ # an insertion event), in which case we just create a new insertion event
+ # that can then get pointed to by a "marker" event later.
+ else:
+ base_insertion_event_dict = self._create_insertion_event_dict(
+ sender=requester.user.to_string(),
+ room_id=room_id,
+ origin_server_ts=last_event_in_chunk["origin_server_ts"],
+ )
+ base_insertion_event_dict["prev_events"] = prev_event_ids.copy()
- # Add an "insertion" event to the start of each chunk (next to the oldest
+ (
+ base_insertion_event,
+ _,
+ ) = await self.event_creation_handler.create_and_send_nonmember_event(
+ requester,
+ base_insertion_event_dict,
+ prev_event_ids=base_insertion_event_dict.get("prev_events"),
+ auth_event_ids=auth_event_ids,
+ historical=True,
+ depth=inherited_depth,
+ )
+
+ chunk_id_to_connect_to = base_insertion_event["content"][
+ EventContentFields.MSC2716_NEXT_CHUNK_ID
+ ]
+
+ # Connect this current chunk to the insertion event from the previous chunk
+ last_event_in_chunk["content"][
+ EventContentFields.MSC2716_CHUNK_ID
+ ] = chunk_id_to_connect_to
+
+ # Add an "insertion" event to the start of each chunk (next to the oldest-in-time
# event in the chunk) so the next chunk can be connected to this one.
- next_chunk_id = random_string(64)
- insertion_event = {
- "type": EventTypes.MSC2716_INSERTION,
- "sender": requester.user.to_string(),
- "content": {
- EventContentFields.MSC2716_NEXT_CHUNK_ID: next_chunk_id,
- EventContentFields.MSC2716_HISTORICAL: True,
- },
+ insertion_event = self._create_insertion_event_dict(
+ sender=requester.user.to_string(),
+ room_id=room_id,
# Since the insertion event is put at the start of the chunk,
- # where the oldest event is, copy the origin_server_ts from
+ # where the oldest-in-time event is, copy the origin_server_ts from
# the first event we're inserting
- "origin_server_ts": events_to_create[0]["origin_server_ts"],
- }
+ origin_server_ts=events_to_create[0]["origin_server_ts"],
+ )
# Prepend the insertion event to the start of the chunk
events_to_create = [insertion_event] + events_to_create
- inherited_depth = await self.inherit_depth_from_prev_ids(prev_events_from_query)
-
event_ids = []
- prev_event_ids = prev_events_from_query
events_to_persist = []
for ev in events_to_create:
assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"])
@@ -533,10 +593,16 @@ class RoomBatchSendEventRestServlet(TransactionRestServlet):
context=context,
)
+ # Add the base_insertion_event to the bottom of the list we return
+ if base_insertion_event is not None:
+ event_ids.append(base_insertion_event.event_id)
+
return 200, {
"state_events": auth_event_ids,
"events": event_ids,
- "next_chunk_id": next_chunk_id,
+ "next_chunk_id": insertion_event["content"][
+ EventContentFields.MSC2716_NEXT_CHUNK_ID
+ ],
}
def on_GET(self, request, room_id):
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index c4474df975..4e06938849 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -1230,7 +1230,9 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
"SELECT coalesce(min(received_ts), 0) FROM federation_inbound_events_staging"
)
- (age,) = txn.fetchone()
+ (received_ts,) = txn.fetchone()
+
+ age = self._clock.time_msec() - received_ts
return count, age
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index 897fa06639..08c580b0dc 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1580,11 +1580,11 @@ class PersistEventsStore:
# invalidate the cache for the redacted event
txn.call_after(self.store._invalidate_get_event_cache, event.redacts)
- self.db_pool.simple_insert_txn(
+ self.db_pool.simple_upsert_txn(
txn,
table="redactions",
+ keyvalues={"event_id": event.event_id},
values={
- "event_id": event.event_id,
"redacts": event.redacts,
"received_ts": self._clock.time_msec(),
},
diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py
index c3f551d377..e3a544d9b2 100644
--- a/synapse/storage/databases/main/metrics.py
+++ b/synapse/storage/databases/main/metrics.py
@@ -320,7 +320,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
"""
Returns millisecond unixtime for start of UTC day.
"""
- now = time.gmtime()
+ now = time.gmtime(self._clock.time())
today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0))
return today_start * 1000
@@ -352,7 +352,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
) udv
ON u.user_id = udv.user_id AND u.device_id=udv.device_id
INNER JOIN users ON users.name=u.user_id
- WHERE last_seen > ? AND last_seen <= ?
+ WHERE ? <= last_seen AND last_seen < ?
AND udv.timestamp IS NULL AND users.is_guest=0
AND users.appservice_id IS NULL
GROUP BY u.user_id, u.device_id
diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py
index 7fb7780d0f..eb4841830d 100644
--- a/synapse/storage/databases/main/purge_events.py
+++ b/synapse/storage/databases/main/purge_events.py
@@ -215,6 +215,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"event_relations",
"event_search",
"rejections",
+ "redactions",
):
logger.info("[purge] removing events from %s", table)
@@ -392,7 +393,6 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"room_memberships",
"room_stats_state",
"room_stats_current",
- "room_stats_historical",
"room_stats_earliest_token",
"rooms",
"stream_ordering_to_exterm",
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index 9f0d64a325..6ddafe5434 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -25,6 +25,7 @@ from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import DatabasePool, LoggingTransaction
from synapse.storage.databases.main.search import SearchStore
+from synapse.storage.types import Cursor
from synapse.types import JsonDict, ThirdPartyInstanceID
from synapse.util import json_encoder
from synapse.util.caches.descriptors import cached
@@ -1022,10 +1023,22 @@ class RoomWorkerStore(SQLBaseStore):
)
-class RoomBackgroundUpdateStore(SQLBaseStore):
+class _BackgroundUpdates:
REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory"
ADD_ROOMS_ROOM_VERSION_COLUMN = "add_rooms_room_version_column"
+ POPULATE_ROOM_DEPTH_MIN_DEPTH2 = "populate_room_depth_min_depth2"
+ REPLACE_ROOM_DEPTH_MIN_DEPTH = "replace_room_depth_min_depth"
+
+
+_REPLACE_ROOM_DEPTH_SQL_COMMANDS = (
+ "DROP TRIGGER populate_min_depth2_trigger ON room_depth",
+ "DROP FUNCTION populate_min_depth2()",
+ "ALTER TABLE room_depth DROP COLUMN min_depth",
+ "ALTER TABLE room_depth RENAME COLUMN min_depth2 TO min_depth",
+)
+
+class RoomBackgroundUpdateStore(SQLBaseStore):
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
@@ -1037,15 +1050,25 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
)
self.db_pool.updates.register_background_update_handler(
- self.REMOVE_TOMESTONED_ROOMS_BG_UPDATE,
+ _BackgroundUpdates.REMOVE_TOMESTONED_ROOMS_BG_UPDATE,
self._remove_tombstoned_rooms_from_directory,
)
self.db_pool.updates.register_background_update_handler(
- self.ADD_ROOMS_ROOM_VERSION_COLUMN,
+ _BackgroundUpdates.ADD_ROOMS_ROOM_VERSION_COLUMN,
self._background_add_rooms_room_version_column,
)
+ # BG updates to change the type of room_depth.min_depth
+ self.db_pool.updates.register_background_update_handler(
+ _BackgroundUpdates.POPULATE_ROOM_DEPTH_MIN_DEPTH2,
+ self._background_populate_room_depth_min_depth2,
+ )
+ self.db_pool.updates.register_background_update_handler(
+ _BackgroundUpdates.REPLACE_ROOM_DEPTH_MIN_DEPTH,
+ self._background_replace_room_depth_min_depth,
+ )
+
async def _background_insert_retention(self, progress, batch_size):
"""Retrieves a list of all rooms within a range and inserts an entry for each of
them into the room_retention table.
@@ -1164,7 +1187,9 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
new_last_room_id = room_id
self.db_pool.updates._background_update_progress_txn(
- txn, self.ADD_ROOMS_ROOM_VERSION_COLUMN, {"room_id": new_last_room_id}
+ txn,
+ _BackgroundUpdates.ADD_ROOMS_ROOM_VERSION_COLUMN,
+ {"room_id": new_last_room_id},
)
return False
@@ -1176,7 +1201,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
if end:
await self.db_pool.updates._end_background_update(
- self.ADD_ROOMS_ROOM_VERSION_COLUMN
+ _BackgroundUpdates.ADD_ROOMS_ROOM_VERSION_COLUMN
)
return batch_size
@@ -1215,7 +1240,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
if not rooms:
await self.db_pool.updates._end_background_update(
- self.REMOVE_TOMESTONED_ROOMS_BG_UPDATE
+ _BackgroundUpdates.REMOVE_TOMESTONED_ROOMS_BG_UPDATE
)
return 0
@@ -1224,7 +1249,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
await self.set_room_is_public(room_id, False)
await self.db_pool.updates._background_update_progress(
- self.REMOVE_TOMESTONED_ROOMS_BG_UPDATE, {"room_id": rooms[-1]}
+ _BackgroundUpdates.REMOVE_TOMESTONED_ROOMS_BG_UPDATE, {"room_id": rooms[-1]}
)
return len(rooms)
@@ -1268,6 +1293,71 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
return max_ordering is None
+ async def _background_populate_room_depth_min_depth2(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ """Populate room_depth.min_depth2
+
+ This is to deal with the fact that min_depth was initially created as a
+ 32-bit integer field.
+ """
+
+ def process(txn: Cursor) -> int:
+ last_room = progress.get("last_room", "")
+ txn.execute(
+ """
+ UPDATE room_depth SET min_depth2=min_depth
+ WHERE room_id IN (
+ SELECT room_id FROM room_depth WHERE room_id > ?
+ ORDER BY room_id LIMIT ?
+ )
+ RETURNING room_id;
+ """,
+ (last_room, batch_size),
+ )
+ row_count = txn.rowcount
+ if row_count == 0:
+ return 0
+ last_room = max(row[0] for row in txn)
+ logger.info("populated room_depth up to %s", last_room)
+
+ self.db_pool.updates._background_update_progress_txn(
+ txn,
+ _BackgroundUpdates.POPULATE_ROOM_DEPTH_MIN_DEPTH2,
+ {"last_room": last_room},
+ )
+ return row_count
+
+ result = await self.db_pool.runInteraction(
+ "_background_populate_min_depth2", process
+ )
+
+ if result != 0:
+ return result
+
+ await self.db_pool.updates._end_background_update(
+ _BackgroundUpdates.POPULATE_ROOM_DEPTH_MIN_DEPTH2
+ )
+ return 0
+
+ async def _background_replace_room_depth_min_depth(
+ self, progress: JsonDict, batch_size: int
+ ) -> int:
+ """Drop the old 'min_depth' column and rename 'min_depth2' into its place."""
+
+ def process(txn: Cursor) -> None:
+ for sql in _REPLACE_ROOM_DEPTH_SQL_COMMANDS:
+ logger.info("completing room_depth migration: %s", sql)
+ txn.execute(sql)
+
+ await self.db_pool.runInteraction("_background_replace_room_depth", process)
+
+ await self.db_pool.updates._end_background_update(
+ _BackgroundUpdates.REPLACE_ROOM_DEPTH_MIN_DEPTH,
+ )
+
+ return 0
+
class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore):
def __init__(self, database: DatabasePool, db_conn, hs):
diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py
index 82a1833509..59d67c255b 100644
--- a/synapse/storage/databases/main/stats.py
+++ b/synapse/storage/databases/main/stats.py
@@ -26,7 +26,6 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import StoreError
from synapse.storage.database import DatabasePool
from synapse.storage.databases.main.state_deltas import StateDeltasStore
-from synapse.storage.engines import PostgresEngine
from synapse.types import JsonDict
from synapse.util.caches.descriptors import cached
@@ -49,14 +48,6 @@ ABSOLUTE_STATS_FIELDS = {
"user": ("joined_rooms",),
}
-# these fields are per-timeslice and so should be reset to 0 upon a new slice
-# You can draw these stats on a histogram.
-# Example: number of events sent locally during a time slice
-PER_SLICE_FIELDS = {
- "room": ("total_events", "total_event_bytes"),
- "user": ("invites_sent", "rooms_created", "total_events", "total_event_bytes"),
-}
-
TYPE_TO_TABLE = {"room": ("room_stats", "room_id"), "user": ("user_stats", "user_id")}
# these are the tables (& ID columns) which contain our actual subjects
@@ -106,7 +97,6 @@ class StatsStore(StateDeltasStore):
self.server_name = hs.hostname
self.clock = self.hs.get_clock()
self.stats_enabled = hs.config.stats_enabled
- self.stats_bucket_size = hs.config.stats_bucket_size
self.stats_delta_processing_lock = DeferredLock()
@@ -122,22 +112,6 @@ class StatsStore(StateDeltasStore):
self.db_pool.updates.register_noop_background_update("populate_stats_cleanup")
self.db_pool.updates.register_noop_background_update("populate_stats_prepare")
- def quantise_stats_time(self, ts):
- """
- Quantises a timestamp to be a multiple of the bucket size.
-
- Args:
- ts (int): the timestamp to quantise, in milliseconds since the Unix
- Epoch
-
- Returns:
- int: a timestamp which
- - is divisible by the bucket size;
- - is no later than `ts`; and
- - is the largest such timestamp.
- """
- return (ts // self.stats_bucket_size) * self.stats_bucket_size
-
async def _populate_stats_process_users(self, progress, batch_size):
"""
This is a background update which regenerates statistics for users.
@@ -288,56 +262,6 @@ class StatsStore(StateDeltasStore):
desc="update_room_state",
)
- async def get_statistics_for_subject(
- self, stats_type: str, stats_id: str, start: str, size: int = 100
- ) -> List[dict]:
- """
- Get statistics for a given subject.
-
- Args:
- stats_type: The type of subject
- stats_id: The ID of the subject (e.g. room_id or user_id)
- start: Pagination start. Number of entries, not timestamp.
- size: How many entries to return.
-
- Returns:
- A list of dicts, where the dict has the keys of
- ABSOLUTE_STATS_FIELDS[stats_type], and "bucket_size" and "end_ts".
- """
- return await self.db_pool.runInteraction(
- "get_statistics_for_subject",
- self._get_statistics_for_subject_txn,
- stats_type,
- stats_id,
- start,
- size,
- )
-
- def _get_statistics_for_subject_txn(
- self, txn, stats_type, stats_id, start, size=100
- ):
- """
- Transaction-bound version of L{get_statistics_for_subject}.
- """
-
- table, id_col = TYPE_TO_TABLE[stats_type]
- selected_columns = list(
- ABSOLUTE_STATS_FIELDS[stats_type] + PER_SLICE_FIELDS[stats_type]
- )
-
- slice_list = self.db_pool.simple_select_list_paginate_txn(
- txn,
- table + "_historical",
- "end_ts",
- start,
- size,
- retcols=selected_columns + ["bucket_size", "end_ts"],
- keyvalues={id_col: stats_id},
- order_direction="DESC",
- )
-
- return slice_list
-
@cached()
async def get_earliest_token_for_stats(
self, stats_type: str, id: str
@@ -451,14 +375,10 @@ class StatsStore(StateDeltasStore):
table, id_col = TYPE_TO_TABLE[stats_type]
- quantised_ts = self.quantise_stats_time(int(ts))
- end_ts = quantised_ts + self.stats_bucket_size
-
# Lets be paranoid and check that all the given field names are known
abs_field_names = ABSOLUTE_STATS_FIELDS[stats_type]
- slice_field_names = PER_SLICE_FIELDS[stats_type]
for field in chain(fields.keys(), absolute_field_overrides.keys()):
- if field not in abs_field_names and field not in slice_field_names:
+ if field not in abs_field_names:
# guard against potential SQL injection dodginess
raise ValueError(
"%s is not a recognised field"
@@ -491,20 +411,6 @@ class StatsStore(StateDeltasStore):
additive_relatives=deltas_of_absolute_fields,
)
- per_slice_additive_relatives = {
- key: fields.get(key, 0) for key in slice_field_names
- }
- self._upsert_copy_from_table_with_additive_relatives_txn(
- txn=txn,
- into_table=table + "_historical",
- keyvalues={id_col: stats_id},
- extra_dst_insvalues={"bucket_size": self.stats_bucket_size},
- extra_dst_keyvalues={"end_ts": end_ts},
- additive_relatives=per_slice_additive_relatives,
- src_table=table + "_current",
- copy_columns=abs_field_names,
- )
-
def _upsert_with_additive_relatives_txn(
self, txn, table, keyvalues, absolutes, additive_relatives
):
@@ -528,7 +434,7 @@ class StatsStore(StateDeltasStore):
]
relative_updates = [
- "%(field)s = EXCLUDED.%(field)s + %(table)s.%(field)s"
+ "%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)"
% {"table": table, "field": field}
for field in additive_relatives.keys()
]
@@ -568,205 +474,13 @@ class StatsStore(StateDeltasStore):
self.db_pool.simple_insert_txn(txn, table, merged_dict)
else:
for (key, val) in additive_relatives.items():
- current_row[key] += val
+ if current_row[key] is None:
+ current_row[key] = val
+ else:
+ current_row[key] += val
current_row.update(absolutes)
self.db_pool.simple_update_one_txn(txn, table, keyvalues, current_row)
- def _upsert_copy_from_table_with_additive_relatives_txn(
- self,
- txn,
- into_table,
- keyvalues,
- extra_dst_keyvalues,
- extra_dst_insvalues,
- additive_relatives,
- src_table,
- copy_columns,
- ):
- """Updates the historic stats table with latest updates.
-
- This involves copying "absolute" fields from the `_current` table, and
- adding relative fields to any existing values.
-
- Args:
- txn: Transaction
- into_table (str): The destination table to UPSERT the row into
- keyvalues (dict[str, any]): Row-identifying key values
- extra_dst_keyvalues (dict[str, any]): Additional keyvalues
- for `into_table`.
- extra_dst_insvalues (dict[str, any]): Additional values to insert
- on new row creation for `into_table`.
- additive_relatives (dict[str, any]): Fields that will be added onto
- if existing row present. (Must be disjoint from copy_columns.)
- src_table (str): The source table to copy from
- copy_columns (iterable[str]): The list of columns to copy
- """
- if self.database_engine.can_native_upsert:
- ins_columns = chain(
- keyvalues,
- copy_columns,
- additive_relatives,
- extra_dst_keyvalues,
- extra_dst_insvalues,
- )
- sel_exprs = chain(
- keyvalues,
- copy_columns,
- (
- "?"
- for _ in chain(
- additive_relatives, extra_dst_keyvalues, extra_dst_insvalues
- )
- ),
- )
- keyvalues_where = ("%s = ?" % f for f in keyvalues)
-
- sets_cc = ("%s = EXCLUDED.%s" % (f, f) for f in copy_columns)
- sets_ar = (
- "%s = EXCLUDED.%s + %s.%s" % (f, f, into_table, f)
- for f in additive_relatives
- )
-
- sql = """
- INSERT INTO %(into_table)s (%(ins_columns)s)
- SELECT %(sel_exprs)s
- FROM %(src_table)s
- WHERE %(keyvalues_where)s
- ON CONFLICT (%(keyvalues)s)
- DO UPDATE SET %(sets)s
- """ % {
- "into_table": into_table,
- "ins_columns": ", ".join(ins_columns),
- "sel_exprs": ", ".join(sel_exprs),
- "keyvalues_where": " AND ".join(keyvalues_where),
- "src_table": src_table,
- "keyvalues": ", ".join(
- chain(keyvalues.keys(), extra_dst_keyvalues.keys())
- ),
- "sets": ", ".join(chain(sets_cc, sets_ar)),
- }
-
- qargs = list(
- chain(
- additive_relatives.values(),
- extra_dst_keyvalues.values(),
- extra_dst_insvalues.values(),
- keyvalues.values(),
- )
- )
- txn.execute(sql, qargs)
- else:
- self.database_engine.lock_table(txn, into_table)
- src_row = self.db_pool.simple_select_one_txn(
- txn, src_table, keyvalues, copy_columns
- )
- all_dest_keyvalues = {**keyvalues, **extra_dst_keyvalues}
- dest_current_row = self.db_pool.simple_select_one_txn(
- txn,
- into_table,
- keyvalues=all_dest_keyvalues,
- retcols=list(chain(additive_relatives.keys(), copy_columns)),
- allow_none=True,
- )
-
- if dest_current_row is None:
- merged_dict = {
- **keyvalues,
- **extra_dst_keyvalues,
- **extra_dst_insvalues,
- **src_row,
- **additive_relatives,
- }
- self.db_pool.simple_insert_txn(txn, into_table, merged_dict)
- else:
- for (key, val) in additive_relatives.items():
- src_row[key] = dest_current_row[key] + val
- self.db_pool.simple_update_txn(
- txn, into_table, all_dest_keyvalues, src_row
- )
-
- async def get_changes_room_total_events_and_bytes(
- self, min_pos: int, max_pos: int
- ) -> Tuple[Dict[str, Dict[str, int]], Dict[str, Dict[str, int]]]:
- """Fetches the counts of events in the given range of stream IDs.
-
- Args:
- min_pos
- max_pos
-
- Returns:
- Mapping of room ID to field changes.
- """
-
- return await self.db_pool.runInteraction(
- "stats_incremental_total_events_and_bytes",
- self.get_changes_room_total_events_and_bytes_txn,
- min_pos,
- max_pos,
- )
-
- def get_changes_room_total_events_and_bytes_txn(
- self, txn, low_pos: int, high_pos: int
- ) -> Tuple[Dict[str, Dict[str, int]], Dict[str, Dict[str, int]]]:
- """Gets the total_events and total_event_bytes counts for rooms and
- senders, in a range of stream_orderings (including backfilled events).
-
- Args:
- txn
- low_pos: Low stream ordering
- high_pos: High stream ordering
-
- Returns:
- The room and user deltas for total_events/total_event_bytes in the
- format of `stats_id` -> fields
- """
-
- if low_pos >= high_pos:
- # nothing to do here.
- return {}, {}
-
- if isinstance(self.database_engine, PostgresEngine):
- new_bytes_expression = "OCTET_LENGTH(json)"
- else:
- new_bytes_expression = "LENGTH(CAST(json AS BLOB))"
-
- sql = """
- SELECT events.room_id, COUNT(*) AS new_events, SUM(%s) AS new_bytes
- FROM events INNER JOIN event_json USING (event_id)
- WHERE (? < stream_ordering AND stream_ordering <= ?)
- OR (? <= stream_ordering AND stream_ordering <= ?)
- GROUP BY events.room_id
- """ % (
- new_bytes_expression,
- )
-
- txn.execute(sql, (low_pos, high_pos, -high_pos, -low_pos))
-
- room_deltas = {
- room_id: {"total_events": new_events, "total_event_bytes": new_bytes}
- for room_id, new_events, new_bytes in txn
- }
-
- sql = """
- SELECT events.sender, COUNT(*) AS new_events, SUM(%s) AS new_bytes
- FROM events INNER JOIN event_json USING (event_id)
- WHERE (? < stream_ordering AND stream_ordering <= ?)
- OR (? <= stream_ordering AND stream_ordering <= ?)
- GROUP BY events.sender
- """ % (
- new_bytes_expression,
- )
-
- txn.execute(sql, (low_pos, high_pos, -high_pos, -low_pos))
-
- user_deltas = {
- user_id: {"total_events": new_events, "total_event_bytes": new_bytes}
- for user_id, new_events, new_bytes in txn
- if self.hs.is_mine_id(user_id)
- }
-
- return room_deltas, user_deltas
-
async def _calculate_and_set_initial_state_for_room(
self, room_id: str
) -> Tuple[dict, dict, int]:
@@ -893,6 +607,7 @@ class StatsStore(StateDeltasStore):
"invited_members": membership_counts.get(Membership.INVITE, 0),
"left_members": membership_counts.get(Membership.LEAVE, 0),
"banned_members": membership_counts.get(Membership.BAN, 0),
+ "knocked_members": membership_counts.get(Membership.KNOCK, 0),
"local_users_in_room": len(local_users_in_room),
},
)
diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py
index 0a53b73ccc..36340a652a 100644
--- a/synapse/storage/schema/__init__.py
+++ b/synapse/storage/schema/__init__.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-SCHEMA_VERSION = 60
+SCHEMA_VERSION = 61
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@@ -21,6 +21,10 @@ older versions of Synapse).
See `README.md <synapse/storage/schema/README.md>`_ for more information on how this
works.
+
+Changes in SCHEMA_VERSION = 61:
+ - The `user_stats_historical` and `room_stats_historical` tables are not written and
+ are not read (previously, they were written but not read).
"""
diff --git a/synapse/storage/schema/main/delta/61/01change_appservices_txns.sql.postgres b/synapse/storage/schema/main/delta/61/01change_appservices_txns.sql.postgres
new file mode 100644
index 0000000000..c8aec78e60
--- /dev/null
+++ b/synapse/storage/schema/main/delta/61/01change_appservices_txns.sql.postgres
@@ -0,0 +1,23 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- we use bigint elsewhere in the database for appservice txn ids (notably,
+-- application_services_state.last_txn), and generally we use bigints everywhere else
+-- we have monotonic counters, so let's bring this one in line.
+--
+-- assuming there aren't thousands of rows for decommisioned/non-functional ASes, this
+-- table should be pretty small, so safe to do a synchronous ALTER TABLE.
+
+ALTER TABLE application_services_txns ALTER COLUMN txn_id SET DATA TYPE BIGINT;
diff --git a/synapse/storage/schema/main/delta/61/02drop_redundant_room_depth_index.sql b/synapse/storage/schema/main/delta/61/02drop_redundant_room_depth_index.sql
new file mode 100644
index 0000000000..35ca7a40c0
--- /dev/null
+++ b/synapse/storage/schema/main/delta/61/02drop_redundant_room_depth_index.sql
@@ -0,0 +1,18 @@
+/* Copyright 2021 The Matrix.org Foundation C.I.C
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- this index is redundant; there is another UNIQUE index on this table.
+DROP INDEX IF EXISTS room_depth_room;
+
diff --git a/synapse/storage/schema/main/delta/61/03recreate_min_depth.py b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py
new file mode 100644
index 0000000000..f8d7db9f2e
--- /dev/null
+++ b/synapse/storage/schema/main/delta/61/03recreate_min_depth.py
@@ -0,0 +1,70 @@
+# Copyright 2021 The Matrix.org Foundation C.I.C.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This migration handles the process of changing the type of `room_depth.min_depth` to
+a BIGINT.
+"""
+from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
+from synapse.storage.types import Cursor
+
+
+def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
+ if not isinstance(database_engine, PostgresEngine):
+ # this only applies to postgres - sqlite does not distinguish between big and
+ # little ints.
+ return
+
+ # First add a new column to contain the bigger min_depth
+ cur.execute("ALTER TABLE room_depth ADD COLUMN min_depth2 BIGINT")
+
+ # Create a trigger which will keep it populated.
+ cur.execute(
+ """
+ CREATE OR REPLACE FUNCTION populate_min_depth2() RETURNS trigger AS $BODY$
+ BEGIN
+ new.min_depth2 := new.min_depth;
+ RETURN NEW;
+ END;
+ $BODY$ LANGUAGE plpgsql
+ """
+ )
+
+ cur.execute(
+ """
+ CREATE TRIGGER populate_min_depth2_trigger BEFORE INSERT OR UPDATE ON room_depth
+ FOR EACH ROW
+ EXECUTE PROCEDURE populate_min_depth2()
+ """
+ )
+
+ # Start a bg process to populate it for old rooms
+ cur.execute(
+ """
+ INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
+ (6103, 'populate_room_depth_min_depth2', '{}')
+ """
+ )
+
+ # and another to switch them over once it completes.
+ cur.execute(
+ """
+ INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES
+ (6103, 'replace_room_depth_min_depth', '{}', 'populate_room_depth2')
+ """
+ )
+
+
+def run_upgrade(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs):
+ pass
diff --git a/sytest-blacklist b/sytest-blacklist
index 566ef96711..de9986357b 100644
--- a/sytest-blacklist
+++ b/sytest-blacklist
@@ -41,8 +41,3 @@ We can't peek into rooms with invited history_visibility
We can't peek into rooms with joined history_visibility
Local users can peek by room alias
Peeked rooms only turn up in the sync for the device who peeked them
-
-
-# Blacklisted due to changes made in #10272
-Outbound federation will ignore a missing event with bad JSON for room version 6
-Federation rejects inbound events where the prev_events cannot be found
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index ebe2c05165..903c69127d 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -43,7 +43,7 @@ class ConfigLoadingTestCase(unittest.TestCase):
def test_generates_and_loads_macaroon_secret_key(self):
self.generate_config()
- with open(self.file, "r") as f:
+ with open(self.file) as f:
raw = yaml.safe_load(f)
self.assertIn("macaroon_secret_key", raw)
@@ -120,7 +120,7 @@ class ConfigLoadingTestCase(unittest.TestCase):
def generate_config_and_remove_lines_containing(self, needle):
self.generate_config()
- with open(self.file, "r") as f:
+ with open(self.file) as f:
contents = f.readlines()
contents = [line for line in contents if needle not in line]
with open(self.file, "w") as f:
diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py
index 875b0d0a11..c4ad33194d 100644
--- a/tests/events/test_presence_router.py
+++ b/tests/events/test_presence_router.py
@@ -152,7 +152,7 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
)
self.assertEqual(len(presence_updates), 1)
- presence_update = presence_updates[0] # type: UserPresenceState
+ presence_update: UserPresenceState = presence_updates[0]
self.assertEqual(presence_update.user_id, self.other_user_one_id)
self.assertEqual(presence_update.state, "online")
self.assertEqual(presence_update.status_msg, "boop")
@@ -274,7 +274,7 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
presence_updates, _ = sync_presence(self, self.other_user_id)
self.assertEqual(len(presence_updates), 1)
- presence_update = presence_updates[0] # type: UserPresenceState
+ presence_update: UserPresenceState = presence_updates[0]
self.assertEqual(presence_update.user_id, self.other_user_id)
self.assertEqual(presence_update.state, "online")
self.assertEqual(presence_update.status_msg, "I'm online!")
@@ -320,7 +320,7 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase):
)
for call in calls:
call_args = call[0]
- federation_transaction = call_args[0] # type: Transaction
+ federation_transaction: Transaction = call_args[0]
# Get the sent EDUs in this transaction
edus = federation_transaction.get_dict()["edus"]
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index cdb41101b3..2928c4f48c 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -103,7 +103,7 @@ class ProfileTestCase(unittest.HomeserverTestCase):
)
self.assertIsNone(
- (self.get_success(self.store.get_profile_displayname(self.frank.localpart)))
+ self.get_success(self.store.get_profile_displayname(self.frank.localpart))
)
def test_set_my_name_if_disabled(self):
diff --git a/tests/handlers/test_space_summary.py b/tests/handlers/test_space_summary.py
index 9771d3fb3b..faed1f1a18 100644
--- a/tests/handlers/test_space_summary.py
+++ b/tests/handlers/test_space_summary.py
@@ -14,7 +14,7 @@
from typing import Any, Iterable, Optional, Tuple
from unittest import mock
-from synapse.api.constants import EventContentFields, RoomTypes
+from synapse.api.constants import EventContentFields, JoinRules, RoomTypes
from synapse.api.errors import AuthError
from synapse.handlers.space_summary import _child_events_comparison_key
from synapse.rest import admin
@@ -178,3 +178,205 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase):
result = self.get_success(self.handler.get_space_summary(user2, self.space))
self._assert_rooms(result, [self.space])
self._assert_events(result, [(self.space, self.room)])
+
+ def test_complex_space(self):
+ """
+ Create a "complex" space to see how it handles things like loops and subspaces.
+ """
+ # Create an inaccessible room.
+ user2 = self.register_user("user2", "pass")
+ token2 = self.login("user2", "pass")
+ room2 = self.helper.create_room_as(user2, tok=token2)
+ # This is a bit odd as "user" is adding a room they don't know about, but
+ # it works for the tests.
+ self._add_child(self.space, room2, self.token)
+
+ # Create a subspace under the space with an additional room in it.
+ subspace = self.helper.create_room_as(
+ self.user,
+ tok=self.token,
+ extra_content={
+ "creation_content": {EventContentFields.ROOM_TYPE: RoomTypes.SPACE}
+ },
+ )
+ subroom = self.helper.create_room_as(self.user, tok=self.token)
+ self._add_child(self.space, subspace, token=self.token)
+ self._add_child(subspace, subroom, token=self.token)
+ # Also add the two rooms from the space into this subspace (causing loops).
+ self._add_child(subspace, self.room, token=self.token)
+ self._add_child(subspace, room2, self.token)
+
+ result = self.get_success(self.handler.get_space_summary(self.user, self.space))
+
+ # The result should include each room a single time and each link.
+ self._assert_rooms(result, [self.space, self.room, subspace, subroom])
+ self._assert_events(
+ result,
+ [
+ (self.space, self.room),
+ (self.space, room2),
+ (self.space, subspace),
+ (subspace, subroom),
+ (subspace, self.room),
+ (subspace, room2),
+ ],
+ )
+
+ def test_fed_complex(self):
+ """
+ Return data over federation and ensure that it is handled properly.
+ """
+ fed_hostname = self.hs.hostname + "2"
+ subspace = "#subspace:" + fed_hostname
+ subroom = "#subroom:" + fed_hostname
+
+ async def summarize_remote_room(
+ _self, room, suggested_only, max_children, exclude_rooms
+ ):
+ # Return some good data, and some bad data:
+ #
+ # * Event *back* to the root room.
+ # * Unrelated events / rooms
+ # * Multiple levels of events (in a not-useful order, e.g. grandchild
+ # events before child events).
+
+ # Note that these entries are brief, but should contain enough info.
+ rooms = [
+ {
+ "room_id": subspace,
+ "world_readable": True,
+ "room_type": RoomTypes.SPACE,
+ },
+ {
+ "room_id": subroom,
+ "world_readable": True,
+ },
+ ]
+ event_content = {"via": [fed_hostname]}
+ events = [
+ {
+ "room_id": subspace,
+ "state_key": subroom,
+ "content": event_content,
+ },
+ ]
+ return rooms, events
+
+ # Add a room to the space which is on another server.
+ self._add_child(self.space, subspace, self.token)
+
+ with mock.patch(
+ "synapse.handlers.space_summary.SpaceSummaryHandler._summarize_remote_room",
+ new=summarize_remote_room,
+ ):
+ result = self.get_success(
+ self.handler.get_space_summary(self.user, self.space)
+ )
+
+ self._assert_rooms(result, [self.space, self.room, subspace, subroom])
+ self._assert_events(
+ result,
+ [
+ (self.space, self.room),
+ (self.space, subspace),
+ (subspace, subroom),
+ ],
+ )
+
+ def test_fed_filtering(self):
+ """
+ Rooms returned over federation should be properly filtered to only include
+ rooms the user has access to.
+ """
+ fed_hostname = self.hs.hostname + "2"
+ subspace = "#subspace:" + fed_hostname
+
+ # Create a few rooms which will have different properties.
+ restricted_room = "#restricted:" + fed_hostname
+ restricted_accessible_room = "#restricted_accessible:" + fed_hostname
+ world_readable_room = "#world_readable:" + fed_hostname
+ joined_room = self.helper.create_room_as(self.user, tok=self.token)
+
+ async def summarize_remote_room(
+ _self, room, suggested_only, max_children, exclude_rooms
+ ):
+ # Note that these entries are brief, but should contain enough info.
+ rooms = [
+ {
+ "room_id": restricted_room,
+ "world_readable": False,
+ "join_rules": JoinRules.MSC3083_RESTRICTED,
+ "allowed_spaces": [],
+ },
+ {
+ "room_id": restricted_accessible_room,
+ "world_readable": False,
+ "join_rules": JoinRules.MSC3083_RESTRICTED,
+ "allowed_spaces": [self.room],
+ },
+ {
+ "room_id": world_readable_room,
+ "world_readable": True,
+ "join_rules": JoinRules.INVITE,
+ },
+ {
+ "room_id": joined_room,
+ "world_readable": False,
+ "join_rules": JoinRules.INVITE,
+ },
+ ]
+
+ # Place each room in the sub-space.
+ event_content = {"via": [fed_hostname]}
+ events = [
+ {
+ "room_id": subspace,
+ "state_key": room["room_id"],
+ "content": event_content,
+ }
+ for room in rooms
+ ]
+
+ # Also include the subspace.
+ rooms.insert(
+ 0,
+ {
+ "room_id": subspace,
+ "world_readable": True,
+ },
+ )
+ return rooms, events
+
+ # Add a room to the space which is on another server.
+ self._add_child(self.space, subspace, self.token)
+
+ with mock.patch(
+ "synapse.handlers.space_summary.SpaceSummaryHandler._summarize_remote_room",
+ new=summarize_remote_room,
+ ):
+ result = self.get_success(
+ self.handler.get_space_summary(self.user, self.space)
+ )
+
+ self._assert_rooms(
+ result,
+ [
+ self.space,
+ self.room,
+ subspace,
+ restricted_accessible_room,
+ world_readable_room,
+ joined_room,
+ ],
+ )
+ self._assert_events(
+ result,
+ [
+ (self.space, self.room),
+ (self.space, subspace),
+ (subspace, restricted_room),
+ (subspace, restricted_accessible_room),
+ (subspace, world_readable_room),
+ (subspace, joined_room),
+ ],
+ )
diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py
index c9d4fd9336..e4059acda3 100644
--- a/tests/handlers/test_stats.py
+++ b/tests/handlers/test_stats.py
@@ -88,16 +88,12 @@ class StatsRoomTests(unittest.HomeserverTestCase):
def _get_current_stats(self, stats_type, stat_id):
table, id_col = stats.TYPE_TO_TABLE[stats_type]
- cols = list(stats.ABSOLUTE_STATS_FIELDS[stats_type]) + list(
- stats.PER_SLICE_FIELDS[stats_type]
- )
-
- end_ts = self.store.quantise_stats_time(self.reactor.seconds() * 1000)
+ cols = list(stats.ABSOLUTE_STATS_FIELDS[stats_type])
return self.get_success(
self.store.db_pool.simple_select_one(
- table + "_historical",
- {id_col: stat_id, end_ts: end_ts},
+ table + "_current",
+ {id_col: stat_id},
cols,
allow_none=True,
)
@@ -156,115 +152,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.assertEqual(len(r), 1)
self.assertEqual(r[0]["topic"], "foo")
- def test_initial_earliest_token(self):
- """
- Ingestion via notify_new_event will ignore tokens that the background
- update have already processed.
- """
-
- self.reactor.advance(86401)
-
- self.hs.config.stats_enabled = False
- self.handler.stats_enabled = False
-
- u1 = self.register_user("u1", "pass")
- u1_token = self.login("u1", "pass")
-
- u2 = self.register_user("u2", "pass")
- u2_token = self.login("u2", "pass")
-
- u3 = self.register_user("u3", "pass")
- u3_token = self.login("u3", "pass")
-
- room_1 = self.helper.create_room_as(u1, tok=u1_token)
- self.helper.send_state(
- room_1, event_type="m.room.topic", body={"topic": "foo"}, tok=u1_token
- )
-
- # Begin the ingestion by creating the temp tables. This will also store
- # the position that the deltas should begin at, once they take over.
- self.hs.config.stats_enabled = True
- self.handler.stats_enabled = True
- self.store.db_pool.updates._all_done = False
- self.get_success(
- self.store.db_pool.simple_update_one(
- table="stats_incremental_position",
- keyvalues={},
- updatevalues={"stream_id": 0},
- )
- )
-
- self.get_success(
- self.store.db_pool.simple_insert(
- "background_updates",
- {"update_name": "populate_stats_prepare", "progress_json": "{}"},
- )
- )
-
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
-
- # Now, before the table is actually ingested, add some more events.
- self.helper.invite(room=room_1, src=u1, targ=u2, tok=u1_token)
- self.helper.join(room=room_1, user=u2, tok=u2_token)
-
- # orig_delta_processor = self.store.
-
- # Now do the initial ingestion.
- self.get_success(
- self.store.db_pool.simple_insert(
- "background_updates",
- {"update_name": "populate_stats_process_rooms", "progress_json": "{}"},
- )
- )
- self.get_success(
- self.store.db_pool.simple_insert(
- "background_updates",
- {
- "update_name": "populate_stats_cleanup",
- "progress_json": "{}",
- "depends_on": "populate_stats_process_rooms",
- },
- )
- )
-
- self.store.db_pool.updates._all_done = False
- while not self.get_success(
- self.store.db_pool.updates.has_completed_background_updates()
- ):
- self.get_success(
- self.store.db_pool.updates.do_next_background_update(100), by=0.1
- )
-
- self.reactor.advance(86401)
-
- # Now add some more events, triggering ingestion. Because of the stream
- # position being set to before the events sent in the middle, a simpler
- # implementation would reprocess those events, and say there were four
- # users, not three.
- self.helper.invite(room=room_1, src=u1, targ=u3, tok=u1_token)
- self.helper.join(room=room_1, user=u3, tok=u3_token)
-
- # self.handler.notify_new_event()
-
- # We need to let the delta processor advanceâŠ
- self.reactor.advance(10 * 60)
-
- # Get the slices! There should be two -- day 1, and day 2.
- r = self.get_success(self.store.get_statistics_for_subject("room", room_1, 0))
-
- self.assertEqual(len(r), 2)
-
- # The oldest has 2 joined members
- self.assertEqual(r[-1]["joined_members"], 2)
-
- # The newest has 3
- self.assertEqual(r[0]["joined_members"], 3)
-
def test_create_user(self):
"""
When we create a user, it should have statistics already ready.
@@ -296,22 +183,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.assertIsNotNone(r1stats)
self.assertIsNotNone(r2stats)
- # contains the default things you'd expect in a fresh room
- self.assertEqual(
- r1stats["total_events"],
- EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM,
- "Wrong number of total_events in new room's stats!"
- " You may need to update this if more state events are added to"
- " the room creation process.",
- )
- self.assertEqual(
- r2stats["total_events"],
- EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM,
- "Wrong number of total_events in new room's stats!"
- " You may need to update this if more state events are added to"
- " the room creation process.",
- )
-
self.assertEqual(
r1stats["current_state_events"], EXPT_NUM_STATE_EVTS_IN_FRESH_PUBLIC_ROOM
)
@@ -327,24 +198,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
self.assertEqual(r2stats["invited_members"], 0)
self.assertEqual(r2stats["banned_members"], 0)
- def test_send_message_increments_total_events(self):
- """
- When we send a message, it increments total_events.
- """
-
- self._perform_background_initial_update()
-
- u1 = self.register_user("u1", "pass")
- u1token = self.login("u1", "pass")
- r1 = self.helper.create_room_as(u1, tok=u1token)
- r1stats_ante = self._get_current_stats("room", r1)
-
- self.helper.send(r1, "hiss", tok=u1token)
-
- r1stats_post = self._get_current_stats("room", r1)
-
- self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
-
def test_updating_profile_information_does_not_increase_joined_members_count(self):
"""
Check that the joined_members count does not increase when a user changes their
@@ -378,7 +231,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
def test_send_state_event_nonoverwriting(self):
"""
- When we send a non-overwriting state event, it increments total_events AND current_state_events
+ When we send a non-overwriting state event, it increments current_state_events
"""
self._perform_background_initial_update()
@@ -399,44 +252,14 @@ class StatsRoomTests(unittest.HomeserverTestCase):
r1stats_post = self._get_current_stats("room", r1)
- self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
self.assertEqual(
r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
1,
)
- def test_send_state_event_overwriting(self):
- """
- When we send an overwriting state event, it increments total_events ONLY
- """
-
- self._perform_background_initial_update()
-
- u1 = self.register_user("u1", "pass")
- u1token = self.login("u1", "pass")
- r1 = self.helper.create_room_as(u1, tok=u1token)
-
- self.helper.send_state(
- r1, "cat.hissing", {"value": True}, tok=u1token, state_key="tabby"
- )
-
- r1stats_ante = self._get_current_stats("room", r1)
-
- self.helper.send_state(
- r1, "cat.hissing", {"value": False}, tok=u1token, state_key="tabby"
- )
-
- r1stats_post = self._get_current_stats("room", r1)
-
- self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
- self.assertEqual(
- r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
- 0,
- )
-
def test_join_first_time(self):
"""
- When a user joins a room for the first time, total_events, current_state_events and
+ When a user joins a room for the first time, current_state_events and
joined_members should increase by exactly 1.
"""
@@ -455,7 +278,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
r1stats_post = self._get_current_stats("room", r1)
- self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
self.assertEqual(
r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
1,
@@ -466,7 +288,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
def test_join_after_leave(self):
"""
- When a user joins a room after being previously left, total_events and
+ When a user joins a room after being previously left,
joined_members should increase by exactly 1.
current_state_events should not increase.
left_members should decrease by exactly 1.
@@ -490,7 +312,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
r1stats_post = self._get_current_stats("room", r1)
- self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
self.assertEqual(
r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
0,
@@ -504,7 +325,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
def test_invited(self):
"""
- When a user invites another user, current_state_events, total_events and
+ When a user invites another user, current_state_events and
invited_members should increase by exactly 1.
"""
@@ -522,7 +343,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
r1stats_post = self._get_current_stats("room", r1)
- self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
self.assertEqual(
r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
1,
@@ -533,7 +353,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
def test_join_after_invite(self):
"""
- When a user joins a room after being invited, total_events and
+ When a user joins a room after being invited and
joined_members should increase by exactly 1.
current_state_events should not increase.
invited_members should decrease by exactly 1.
@@ -556,7 +376,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
r1stats_post = self._get_current_stats("room", r1)
- self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
self.assertEqual(
r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
0,
@@ -570,7 +389,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
def test_left(self):
"""
- When a user leaves a room after joining, total_events and
+ When a user leaves a room after joining and
left_members should increase by exactly 1.
current_state_events should not increase.
joined_members should decrease by exactly 1.
@@ -593,7 +412,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
r1stats_post = self._get_current_stats("room", r1)
- self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
self.assertEqual(
r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
0,
@@ -607,7 +425,7 @@ class StatsRoomTests(unittest.HomeserverTestCase):
def test_banned(self):
"""
- When a user is banned from a room after joining, total_events and
+ When a user is banned from a room after joining and
left_members should increase by exactly 1.
current_state_events should not increase.
banned_members should decrease by exactly 1.
@@ -630,7 +448,6 @@ class StatsRoomTests(unittest.HomeserverTestCase):
r1stats_post = self._get_current_stats("room", r1)
- self.assertEqual(r1stats_post["total_events"] - r1stats_ante["total_events"], 1)
self.assertEqual(
r1stats_post["current_state_events"] - r1stats_ante["current_state_events"],
0,
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index f58afbc244..fa3cff598e 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -38,6 +38,9 @@ U_ONION = UserID.from_string("@onion:farm")
# Test room id
ROOM_ID = "a-room"
+# Room we're not in
+OTHER_ROOM_ID = "another-room"
+
def _expect_edu_transaction(edu_type, content, origin="test"):
return {
@@ -115,6 +118,11 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
hs.get_auth().check_user_in_room = check_user_in_room
+ async def check_host_in_room(room_id, server_name):
+ return room_id == ROOM_ID
+
+ hs.get_event_auth_handler().check_host_in_room = check_host_in_room
+
def get_joined_hosts_for_room(room_id):
return {member.domain for member in self.room_members}
@@ -244,6 +252,35 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase):
],
)
+ def test_started_typing_remote_recv_not_in_room(self):
+ self.room_members = [U_APPLE, U_ONION]
+
+ self.assertEquals(self.event_source.get_current_key(), 0)
+
+ channel = self.make_request(
+ "PUT",
+ "/_matrix/federation/v1/send/1000000",
+ _make_edu_transaction_json(
+ "m.typing",
+ content={
+ "room_id": OTHER_ROOM_ID,
+ "user_id": U_ONION.to_string(),
+ "typing": True,
+ },
+ ),
+ federation_auth_origin=b"farm",
+ )
+ self.assertEqual(channel.code, 200)
+
+ self.on_new_event.assert_not_called()
+
+ self.assertEquals(self.event_source.get_current_key(), 0)
+ events = self.get_success(
+ self.event_source.get_new_events(room_ids=[OTHER_ROOM_ID], from_key=0)
+ )
+ self.assertEquals(events[0], [])
+ self.assertEquals(events[1], 0)
+
@override_config({"send_federation": True})
def test_stopped_typing(self):
self.room_members = [U_APPLE, U_BANANA, U_ONION]
diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py
index e45980316b..a37bce08c3 100644
--- a/tests/http/federation/test_matrix_federation_agent.py
+++ b/tests/http/federation/test_matrix_federation_agent.py
@@ -273,7 +273,7 @@ class MatrixFederationAgentTests(unittest.TestCase):
self.assertEqual(response.code, 200)
# Send the body
- request.write('{ "a": 1 }'.encode("ascii"))
+ request.write(b'{ "a": 1 }')
request.finish()
self.reactor.pump((0.1,))
diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py
index ed9a884d76..d9a8b077d3 100644
--- a/tests/http/test_fedclient.py
+++ b/tests/http/test_fedclient.py
@@ -102,7 +102,7 @@ class FederationClientTests(HomeserverTestCase):
self.assertNoResult(test_d)
# Send it the HTTP response
- res_json = '{ "a": 1 }'.encode("ascii")
+ res_json = b'{ "a": 1 }'
protocol.dataReceived(
b"HTTP/1.1 200 OK\r\n"
b"Server: Fake\r\n"
@@ -339,10 +339,8 @@ class FederationClientTests(HomeserverTestCase):
# Send it the HTTP response
client.dataReceived(
- (
- b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n"
- b"Server: Fake\r\n\r\n"
- )
+ b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n"
+ b"Server: Fake\r\n\r\n"
)
# Push by enough to time it out
diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py
index 2c68b9a13c..81d9e2f484 100644
--- a/tests/module_api/test_api.py
+++ b/tests/module_api/test_api.py
@@ -100,9 +100,9 @@ class ModuleApiTestCase(HomeserverTestCase):
"content": content,
"sender": user_id,
}
- event = self.get_success(
+ event: EventBase = self.get_success(
self.module_api.create_and_send_event_into_room(event_dict)
- ) # type: EventBase
+ )
self.assertEqual(event.sender, user_id)
self.assertEqual(event.type, "m.room.message")
self.assertEqual(event.room_id, room_id)
@@ -136,9 +136,9 @@ class ModuleApiTestCase(HomeserverTestCase):
"sender": user_id,
"state_key": "",
}
- event = self.get_success(
+ event: EventBase = self.get_success(
self.module_api.create_and_send_event_into_room(event_dict)
- ) # type: EventBase
+ )
self.assertEqual(event.sender, user_id)
self.assertEqual(event.type, "m.room.power_levels")
self.assertEqual(event.room_id, room_id)
@@ -281,7 +281,7 @@ class ModuleApiTestCase(HomeserverTestCase):
)
for call in calls:
call_args = call[0]
- federation_transaction = call_args[0] # type: Transaction
+ federation_transaction: Transaction = call_args[0]
# Get the sent EDUs in this transaction
edus = federation_transaction.get_dict()["edus"]
@@ -390,7 +390,7 @@ def _test_sending_local_online_presence_to_local_user(
)
test_case.assertEqual(len(presence_updates), 1)
- presence_update = presence_updates[0] # type: UserPresenceState
+ presence_update: UserPresenceState = presence_updates[0]
test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id)
test_case.assertEqual(presence_update.state, "online")
@@ -443,7 +443,7 @@ def _test_sending_local_online_presence_to_local_user(
)
test_case.assertEqual(len(presence_updates), 1)
- presence_update = presence_updates[0] # type: UserPresenceState
+ presence_update: UserPresenceState = presence_updates[0]
test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id)
test_case.assertEqual(presence_update.state, "online")
@@ -454,7 +454,7 @@ def _test_sending_local_online_presence_to_local_user(
)
test_case.assertEqual(len(presence_updates), 1)
- presence_update = presence_updates[0] # type: UserPresenceState
+ presence_update: UserPresenceState = presence_updates[0]
test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id)
test_case.assertEqual(presence_update.state, "online")
diff --git a/tests/replication/_base.py b/tests/replication/_base.py
index 624bd1b927..e9fd991718 100644
--- a/tests/replication/_base.py
+++ b/tests/replication/_base.py
@@ -53,9 +53,9 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
# build a replication server
server_factory = ReplicationStreamProtocolFactory(hs)
self.streamer = hs.get_replication_streamer()
- self.server = server_factory.buildProtocol(
+ self.server: ServerReplicationStreamProtocol = server_factory.buildProtocol(
None
- ) # type: ServerReplicationStreamProtocol
+ )
# Make a new HomeServer object for the worker
self.reactor.lookups["testserv"] = "1.2.3.4"
@@ -195,7 +195,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase):
fetching updates for given stream.
"""
- path = request.path # type: bytes # type: ignore
+ path: bytes = request.path # type: ignore
self.assertRegex(
path,
br"^/_synapse/replication/get_repl_stream_updates/%s/[^/]+$"
@@ -212,7 +212,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
unlike `BaseStreamTestCase`.
"""
- servlets = [] # type: List[Callable[[HomeServer, JsonResource], None]]
+ servlets: List[Callable[[HomeServer, JsonResource], None]] = []
def setUp(self):
super().setUp()
@@ -448,7 +448,7 @@ class TestReplicationDataHandler(ReplicationDataHandler):
super().__init__(hs)
# list of received (stream_name, token, row) tuples
- self.received_rdata_rows = [] # type: List[Tuple[str, int, Any]]
+ self.received_rdata_rows: List[Tuple[str, int, Any]] = []
async def on_rdata(self, stream_name, instance_name, token, rows):
await super().on_rdata(stream_name, instance_name, token, rows)
@@ -484,7 +484,7 @@ class FakeRedisPubSubServer:
class FakeRedisPubSubProtocol(Protocol):
"""A connection from a client talking to the fake Redis server."""
- transport = None # type: Optional[FakeTransport]
+ transport: Optional[FakeTransport] = None
def __init__(self, server: FakeRedisPubSubServer):
self._server = server
@@ -550,12 +550,12 @@ class FakeRedisPubSubProtocol(Protocol):
if obj is None:
return "$-1\r\n"
if isinstance(obj, str):
- return "${len}\r\n{str}\r\n".format(len=len(obj), str=obj)
+ return f"${len(obj)}\r\n{obj}\r\n"
if isinstance(obj, int):
- return ":{val}\r\n".format(val=obj)
+ return f":{obj}\r\n"
if isinstance(obj, (list, tuple)):
items = "".join(self.encode(a) for a in obj)
- return "*{len}\r\n{items}".format(len=len(obj), items=items)
+ return f"*{len(obj)}\r\n{items}"
raise Exception("Unrecognized type for encoding redis: %r: %r", type(obj), obj)
diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py
index f51fa0a79e..666008425a 100644
--- a/tests/replication/tcp/streams/test_events.py
+++ b/tests/replication/tcp/streams/test_events.py
@@ -135,9 +135,9 @@ class EventsStreamTestCase(BaseStreamTestCase):
)
# this is the point in the DAG where we make a fork
- fork_point = self.get_success(
+ fork_point: List[str] = self.get_success(
self.hs.get_datastore().get_latest_event_ids_in_room(self.room_id)
- ) # type: List[str]
+ )
events = [
self._inject_state_event(sender=OTHER_USER)
@@ -238,7 +238,7 @@ class EventsStreamTestCase(BaseStreamTestCase):
self.assertEqual(row.data.event_id, pl_event.event_id)
# the state rows are unsorted
- state_rows = [] # type: List[EventsStreamCurrentStateRow]
+ state_rows: List[EventsStreamCurrentStateRow] = []
for stream_name, _, row in received_rows:
self.assertEqual("events", stream_name)
self.assertIsInstance(row, EventsStreamRow)
@@ -290,11 +290,11 @@ class EventsStreamTestCase(BaseStreamTestCase):
)
# this is the point in the DAG where we make a fork
- fork_point = self.get_success(
+ fork_point: List[str] = self.get_success(
self.hs.get_datastore().get_latest_event_ids_in_room(self.room_id)
- ) # type: List[str]
+ )
- events = [] # type: List[EventBase]
+ events: List[EventBase] = []
for user in user_ids:
events.extend(
self._inject_state_event(sender=user) for _ in range(STATES_PER_USER)
@@ -355,7 +355,7 @@ class EventsStreamTestCase(BaseStreamTestCase):
self.assertEqual(row.data.event_id, pl_events[i].event_id)
# the state rows are unsorted
- state_rows = [] # type: List[EventsStreamCurrentStateRow]
+ state_rows: List[EventsStreamCurrentStateRow] = []
for _ in range(STATES_PER_USER + 1):
stream_name, token, row = received_rows.pop(0)
self.assertEqual("events", stream_name)
diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py
index 7f5d932f0b..38e292c1ab 100644
--- a/tests/replication/tcp/streams/test_receipts.py
+++ b/tests/replication/tcp/streams/test_receipts.py
@@ -43,7 +43,7 @@ class ReceiptsStreamTestCase(BaseStreamTestCase):
stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0]
self.assertEqual(stream_name, "receipts")
self.assertEqual(1, len(rdata_rows))
- row = rdata_rows[0] # type: ReceiptsStream.ReceiptsStreamRow
+ row: ReceiptsStream.ReceiptsStreamRow = rdata_rows[0]
self.assertEqual("!room:blue", row.room_id)
self.assertEqual("m.read", row.receipt_type)
self.assertEqual(USER_ID, row.user_id)
@@ -75,7 +75,7 @@ class ReceiptsStreamTestCase(BaseStreamTestCase):
self.assertEqual(token, 3)
self.assertEqual(1, len(rdata_rows))
- row = rdata_rows[0] # type: ReceiptsStream.ReceiptsStreamRow
+ row: ReceiptsStream.ReceiptsStreamRow = rdata_rows[0]
self.assertEqual("!room2:blue", row.room_id)
self.assertEqual("m.read", row.receipt_type)
self.assertEqual(USER_ID, row.user_id)
diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py
index ecd360c2d0..3ff5afc6e5 100644
--- a/tests/replication/tcp/streams/test_typing.py
+++ b/tests/replication/tcp/streams/test_typing.py
@@ -47,7 +47,7 @@ class TypingStreamTestCase(BaseStreamTestCase):
stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0]
self.assertEqual(stream_name, "typing")
self.assertEqual(1, len(rdata_rows))
- row = rdata_rows[0] # type: TypingStream.TypingStreamRow
+ row: TypingStream.TypingStreamRow = rdata_rows[0]
self.assertEqual(ROOM_ID, row.room_id)
self.assertEqual([USER_ID], row.user_ids)
@@ -102,7 +102,7 @@ class TypingStreamTestCase(BaseStreamTestCase):
stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0]
self.assertEqual(stream_name, "typing")
self.assertEqual(1, len(rdata_rows))
- row = rdata_rows[0] # type: TypingStream.TypingStreamRow
+ row: TypingStream.TypingStreamRow = rdata_rows[0]
self.assertEqual(ROOM_ID, row.room_id)
self.assertEqual([USER_ID], row.user_ids)
diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py
index 76e6644353..ffa425328f 100644
--- a/tests/replication/test_multi_media_repo.py
+++ b/tests/replication/test_multi_media_repo.py
@@ -31,7 +31,7 @@ from tests.server import FakeChannel, FakeSite, FakeTransport, make_request
logger = logging.getLogger(__name__)
-test_server_connection_factory = None # type: Optional[TestServerTLSConnectionFactory]
+test_server_connection_factory: Optional[TestServerTLSConnectionFactory] = None
class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase):
@@ -70,7 +70,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase):
self.reactor,
FakeSite(resource),
"GET",
- "/{}/{}".format(target, media_id),
+ f"/{target}/{media_id}",
shorthand=False,
access_token=self.access_token,
await_result=False,
@@ -113,7 +113,7 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase):
self.assertEqual(request.method, b"GET")
self.assertEqual(
request.path,
- "/_matrix/media/r0/download/{}/{}".format(target, media_id).encode("utf-8"),
+ f"/_matrix/media/r0/download/{target}/{media_id}".encode("utf-8"),
)
self.assertEqual(
request.requestHeaders.getRawHeaders(b"host"), [target.encode("utf-8")]
diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py
index 5eca5c165d..f3615af97e 100644
--- a/tests/replication/test_sharded_event_persister.py
+++ b/tests/replication/test_sharded_event_persister.py
@@ -211,7 +211,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
self.reactor,
sync_hs_site,
"GET",
- "/sync?since={}".format(next_batch),
+ f"/sync?since={next_batch}",
access_token=access_token,
)
@@ -241,7 +241,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
self.reactor,
sync_hs_site,
"GET",
- "/sync?since={}".format(vector_clock_token),
+ f"/sync?since={vector_clock_token}",
access_token=access_token,
)
@@ -266,7 +266,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase):
self.reactor,
sync_hs_site,
"GET",
- "/sync?since={}".format(next_batch),
+ f"/sync?since={next_batch}",
access_token=access_token,
)
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 2f7090e554..a7c6e595b9 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -66,7 +66,7 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase):
# Create a new group
channel = self.make_request(
"POST",
- "/create_group".encode("ascii"),
+ b"/create_group",
access_token=self.admin_user_tok,
content={"localpart": "test"},
)
@@ -129,9 +129,7 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase):
def _get_groups_user_is_in(self, access_token):
"""Returns the list of groups the user is in (given their access token)"""
- channel = self.make_request(
- "GET", "/joined_groups".encode("ascii"), access_token=access_token
- )
+ channel = self.make_request("GET", b"/joined_groups", access_token=access_token)
self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py
index ee071c2477..17ec8bfd3b 100644
--- a/tests/rest/admin/test_room.py
+++ b/tests/rest/admin/test_room.py
@@ -535,7 +535,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
)
)
- self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))
+ self.assertEqual(count, 0, msg=f"Rows not purged in {table}")
def _assert_peek(self, room_id, expect_code):
"""Assert that the admin user can (or cannot) peek into the room."""
@@ -599,7 +599,7 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase):
)
)
- self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))
+ self.assertEqual(count, 0, msg=f"Rows not purged in {table}")
class RoomTestCase(unittest.HomeserverTestCase):
@@ -1280,7 +1280,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
self.public_room_id = self.helper.create_room_as(
self.creator, tok=self.creator_tok, is_public=True
)
- self.url = "/_synapse/admin/v1/join/{}".format(self.public_room_id)
+ self.url = f"/_synapse/admin/v1/join/{self.public_room_id}"
def test_requester_is_no_admin(self):
"""
@@ -1420,7 +1420,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
private_room_id = self.helper.create_room_as(
self.creator, tok=self.creator_tok, is_public=False
)
- url = "/_synapse/admin/v1/join/{}".format(private_room_id)
+ url = f"/_synapse/admin/v1/join/{private_room_id}"
body = json.dumps({"user_id": self.second_user_id})
channel = self.make_request(
@@ -1463,7 +1463,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
# Join user to room.
- url = "/_synapse/admin/v1/join/{}".format(private_room_id)
+ url = f"/_synapse/admin/v1/join/{private_room_id}"
body = json.dumps({"user_id": self.second_user_id})
channel = self.make_request(
@@ -1493,7 +1493,7 @@ class JoinAliasRoomTestCase(unittest.HomeserverTestCase):
private_room_id = self.helper.create_room_as(
self.admin_user, tok=self.admin_user_tok, is_public=False
)
- url = "/_synapse/admin/v1/join/{}".format(private_room_id)
+ url = f"/_synapse/admin/v1/join/{private_room_id}"
body = json.dumps({"user_id": self.second_user_id})
channel = self.make_request(
@@ -1633,7 +1633,7 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST",
- "/_synapse/admin/v1/rooms/{}/make_room_admin".format(room_id),
+ f"/_synapse/admin/v1/rooms/{room_id}/make_room_admin",
content={},
access_token=self.admin_user_tok,
)
@@ -1660,7 +1660,7 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST",
- "/_synapse/admin/v1/rooms/{}/make_room_admin".format(room_id),
+ f"/_synapse/admin/v1/rooms/{room_id}/make_room_admin",
content={},
access_token=self.admin_user_tok,
)
@@ -1686,7 +1686,7 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST",
- "/_synapse/admin/v1/rooms/{}/make_room_admin".format(room_id),
+ f"/_synapse/admin/v1/rooms/{room_id}/make_room_admin",
content={"user_id": self.second_user_id},
access_token=self.admin_user_tok,
)
@@ -1720,7 +1720,7 @@ class MakeRoomAdminTestCase(unittest.HomeserverTestCase):
channel = self.make_request(
"POST",
- "/_synapse/admin/v1/rooms/{}/make_room_admin".format(room_id),
+ f"/_synapse/admin/v1/rooms/{room_id}/make_room_admin",
content={},
access_token=self.admin_user_tok,
)
@@ -1753,7 +1753,6 @@ PURGE_TABLES = [
"room_memberships",
"room_stats_state",
"room_stats_current",
- "room_stats_historical",
"room_stats_earliest_token",
"rooms",
"stream_ordering_to_exterm",
diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py
index e1fe72fc5d..c5e1c5458b 100644
--- a/tests/rest/client/test_third_party_rules.py
+++ b/tests/rest/client/test_third_party_rules.py
@@ -233,11 +233,11 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase):
"content": content,
"sender": self.user_id,
}
- event = self.get_success(
+ event: EventBase = self.get_success(
current_rules_module().module_api.create_and_send_event_into_room(
event_dict
)
- ) # type: EventBase
+ )
self.assertEquals(event.sender, self.user_id)
self.assertEquals(event.room_id, self.room_id)
diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py
index 605b952316..7eba69642a 100644
--- a/tests/rest/client/v1/test_login.py
+++ b/tests/rest/client/v1/test_login.py
@@ -453,7 +453,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
self.assertEqual(channel.code, 200, channel.result)
# stick the flows results in a dict by type
- flow_results = {} # type: Dict[str, Any]
+ flow_results: Dict[str, Any] = {}
for f in channel.json_body["flows"]:
flow_type = f["type"]
self.assertNotIn(
@@ -501,7 +501,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
p.close()
# there should be a link for each href
- returned_idps = [] # type: List[str]
+ returned_idps: List[str] = []
for link in p.links:
path, query = link.split("?", 1)
self.assertEqual(path, "pick_idp")
@@ -582,7 +582,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase):
# ... and should have set a cookie including the redirect url
cookie_headers = channel.headers.getRawHeaders("Set-Cookie")
assert cookie_headers
- cookies = {} # type: Dict[str, str]
+ cookies: Dict[str, str] = {}
for h in cookie_headers:
key, value = h.split(";")[0].split("=", maxsplit=1)
cookies[key] = value
@@ -874,9 +874,7 @@ class JWTTestCase(unittest.HomeserverTestCase):
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_secret) -> str:
# PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str.
- result = jwt.encode(
- payload, secret, self.jwt_algorithm
- ) # type: Union[str, bytes]
+ result: Union[str, bytes] = jwt.encode(payload, secret, self.jwt_algorithm)
if isinstance(result, bytes):
return result.decode("ascii")
return result
@@ -1084,7 +1082,7 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase):
def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> str:
# PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str.
- result = jwt.encode(payload, secret, "RS256") # type: Union[bytes,str]
+ result: Union[bytes, str] = jwt.encode(payload, secret, "RS256")
if isinstance(result, bytes):
return result.decode("ascii")
return result
@@ -1272,7 +1270,7 @@ class UsernamePickerTestCase(HomeserverTestCase):
self.assertEqual(picker_url, "/_synapse/client/pick_username/account_details")
# ... with a username_mapping_session cookie
- cookies = {} # type: Dict[str,str]
+ cookies: Dict[str, str] = {}
channel.extract_cookies(cookies)
self.assertIn("username_mapping_session", cookies)
session_id = cookies["username_mapping_session"]
diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py
index e94566ffd7..3df070c936 100644
--- a/tests/rest/client/v1/test_rooms.py
+++ b/tests/rest/client/v1/test_rooms.py
@@ -1206,7 +1206,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase):
reason = "hello"
channel = self.make_request(
"POST",
- "/_matrix/client/r0/rooms/{}/join".format(self.room_id),
+ f"/_matrix/client/r0/rooms/{self.room_id}/join",
content={"reason": reason},
access_token=self.second_tok,
)
@@ -1220,7 +1220,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase):
reason = "hello"
channel = self.make_request(
"POST",
- "/_matrix/client/r0/rooms/{}/leave".format(self.room_id),
+ f"/_matrix/client/r0/rooms/{self.room_id}/leave",
content={"reason": reason},
access_token=self.second_tok,
)
@@ -1234,7 +1234,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase):
reason = "hello"
channel = self.make_request(
"POST",
- "/_matrix/client/r0/rooms/{}/kick".format(self.room_id),
+ f"/_matrix/client/r0/rooms/{self.room_id}/kick",
content={"reason": reason, "user_id": self.second_user_id},
access_token=self.second_tok,
)
@@ -1248,7 +1248,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase):
reason = "hello"
channel = self.make_request(
"POST",
- "/_matrix/client/r0/rooms/{}/ban".format(self.room_id),
+ f"/_matrix/client/r0/rooms/{self.room_id}/ban",
content={"reason": reason, "user_id": self.second_user_id},
access_token=self.creator_tok,
)
@@ -1260,7 +1260,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase):
reason = "hello"
channel = self.make_request(
"POST",
- "/_matrix/client/r0/rooms/{}/unban".format(self.room_id),
+ f"/_matrix/client/r0/rooms/{self.room_id}/unban",
content={"reason": reason, "user_id": self.second_user_id},
access_token=self.creator_tok,
)
@@ -1272,7 +1272,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase):
reason = "hello"
channel = self.make_request(
"POST",
- "/_matrix/client/r0/rooms/{}/invite".format(self.room_id),
+ f"/_matrix/client/r0/rooms/{self.room_id}/invite",
content={"reason": reason, "user_id": self.second_user_id},
access_token=self.creator_tok,
)
@@ -1291,7 +1291,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase):
reason = "hello"
channel = self.make_request(
"POST",
- "/_matrix/client/r0/rooms/{}/leave".format(self.room_id),
+ f"/_matrix/client/r0/rooms/{self.room_id}/leave",
content={"reason": reason},
access_token=self.second_tok,
)
diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py
index 856aa8682f..2e2f94742e 100644
--- a/tests/rest/client/v2_alpha/test_relations.py
+++ b/tests/rest/client/v2_alpha/test_relations.py
@@ -273,7 +273,7 @@ class RelationsTestCase(unittest.HomeserverTestCase):
prev_token = None
found_event_ids = []
- encoded_key = urllib.parse.quote_plus("đ".encode("utf-8"))
+ encoded_key = urllib.parse.quote_plus("đ".encode())
for _ in range(20):
from_token = ""
if prev_token:
diff --git a/tests/rest/client/v2_alpha/test_report_event.py b/tests/rest/client/v2_alpha/test_report_event.py
index 1ec6b05e5b..a76a6fef1e 100644
--- a/tests/rest/client/v2_alpha/test_report_event.py
+++ b/tests/rest/client/v2_alpha/test_report_event.py
@@ -41,7 +41,7 @@ class ReportEventTestCase(unittest.HomeserverTestCase):
self.helper.join(self.room_id, user=self.admin_user, tok=self.admin_user_tok)
resp = self.helper.send(self.room_id, tok=self.admin_user_tok)
self.event_id = resp["event_id"]
- self.report_path = "rooms/{}/report/{}".format(self.room_id, self.event_id)
+ self.report_path = f"rooms/{self.room_id}/report/{self.event_id}"
def test_reason_str_and_score_int(self):
data = {"reason": "this makes me sad", "score": -100}
diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py
index 95e7075841..2d6b49692e 100644
--- a/tests/rest/media/v1/test_media_storage.py
+++ b/tests/rest/media/v1/test_media_storage.py
@@ -310,7 +310,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
correctly decode it as the UTF-8 string, and use filename* in the
response.
"""
- filename = parse.quote("\u2603".encode("utf8")).encode("ascii")
+ filename = parse.quote("\u2603".encode()).encode("ascii")
channel = self._req(
b"inline; filename*=utf-8''" + filename + self.test_image.extension
)
diff --git a/tests/server.py b/tests/server.py
index f32d8dc375..6fddd3b305 100644
--- a/tests/server.py
+++ b/tests/server.py
@@ -52,7 +52,7 @@ class FakeChannel:
_reactor = attr.ib()
result = attr.ib(type=dict, default=attr.Factory(dict))
_ip = attr.ib(type=str, default="127.0.0.1")
- _producer = None # type: Optional[Union[IPullProducer, IPushProducer]]
+ _producer: Optional[Union[IPullProducer, IPushProducer]] = None
@property
def json_body(self):
@@ -316,8 +316,10 @@ class ThreadedMemoryReactorClock(MemoryReactorClock):
self._tcp_callbacks = {}
self._udp = []
- lookups = self.lookups = {} # type: Dict[str, str]
- self._thread_callbacks = deque() # type: Deque[Callable[[], None]]
+ self.lookups: Dict[str, str] = {}
+ self._thread_callbacks: Deque[Callable[[], None]] = deque()
+
+ lookups = self.lookups
@implementer(IResolverSimple)
class FakeResolver:
diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py
index 069db0edc4..0da42b5ac5 100644
--- a/tests/storage/test_background_update.py
+++ b/tests/storage/test_background_update.py
@@ -7,9 +7,7 @@ from tests import unittest
class BackgroundUpdateTestCase(unittest.HomeserverTestCase):
def prepare(self, reactor, clock, homeserver):
- self.updates = (
- self.hs.get_datastore().db_pool.updates
- ) # type: BackgroundUpdater
+ self.updates: BackgroundUpdater = self.hs.get_datastore().db_pool.updates
# the base test class should have run the real bg updates for us
self.assertTrue(
self.get_success(self.updates.has_completed_background_updates())
diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py
index 41bef62ca8..43628ce44f 100644
--- a/tests/storage/test_directory.py
+++ b/tests/storage/test_directory.py
@@ -59,5 +59,5 @@ class DirectoryStoreTestCase(HomeserverTestCase):
self.assertEqual(self.room.to_string(), room_id)
self.assertIsNone(
- (self.get_success(self.store.get_association_from_room_alias(self.alias)))
+ self.get_success(self.store.get_association_from_room_alias(self.alias))
)
diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py
index 792b1c44c1..7486078284 100644
--- a/tests/storage/test_id_generators.py
+++ b/tests/storage/test_id_generators.py
@@ -27,7 +27,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
- self.db_pool = self.store.db_pool # type: DatabasePool
+ self.db_pool: DatabasePool = self.store.db_pool
self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
@@ -460,7 +460,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase):
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
- self.db_pool = self.store.db_pool # type: DatabasePool
+ self.db_pool: DatabasePool = self.store.db_pool
self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
@@ -586,7 +586,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase):
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
- self.db_pool = self.store.db_pool # type: DatabasePool
+ self.db_pool: DatabasePool = self.store.db_pool
self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py
index 8a446da848..a1ba99ff14 100644
--- a/tests/storage/test_profile.py
+++ b/tests/storage/test_profile.py
@@ -45,11 +45,7 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase):
)
self.assertIsNone(
- (
- self.get_success(
- self.store.get_profile_displayname(self.u_frank.localpart)
- )
- )
+ self.get_success(self.store.get_profile_displayname(self.u_frank.localpart))
)
def test_avatar_url(self):
@@ -76,9 +72,5 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase):
)
self.assertIsNone(
- (
- self.get_success(
- self.store.get_profile_avatar_url(self.u_frank.localpart)
- )
- )
+ self.get_success(self.store.get_profile_avatar_url(self.u_frank.localpart))
)
diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py
index 54c5b470c7..e5574063f1 100644
--- a/tests/storage/test_purge.py
+++ b/tests/storage/test_purge.py
@@ -75,7 +75,7 @@ class PurgeTests(HomeserverTestCase):
token = self.get_success(
self.store.get_topological_token_for_event(last["event_id"])
)
- event = "t{}-{}".format(token.topological + 1, token.stream + 1)
+ event = f"t{token.topological + 1}-{token.stream + 1}"
# Purge everything before this topological token
f = self.get_failure(
diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py
index 70257bf210..31ce7f6252 100644
--- a/tests/storage/test_room.py
+++ b/tests/storage/test_room.py
@@ -49,7 +49,7 @@ class RoomStoreTestCase(HomeserverTestCase):
)
def test_get_room_unknown_room(self):
- self.assertIsNone((self.get_success(self.store.get_room("!uknown:test"))))
+ self.assertIsNone(self.get_success(self.store.get_room("!uknown:test")))
def test_get_room_with_stats(self):
self.assertDictContainsSubset(
diff --git a/tests/test_state.py b/tests/test_state.py
index 62f7095873..780eba823c 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -199,7 +199,7 @@ class StateTestCase(unittest.TestCase):
self.store.register_events(graph.walk())
- context_store = {} # type: dict[str, EventContext]
+ context_store: dict[str, EventContext] = {}
for event in graph.walk():
context = yield defer.ensureDeferred(
diff --git a/tests/test_types.py b/tests/test_types.py
index d7881021d3..0d0c00d97a 100644
--- a/tests/test_types.py
+++ b/tests/test_types.py
@@ -103,6 +103,4 @@ class MapUsernameTestCase(unittest.TestCase):
def testNonAscii(self):
# this should work with either a unicode or a bytes
self.assertEqual(map_username_to_mxid_localpart("tĂȘst"), "t=c3=aast")
- self.assertEqual(
- map_username_to_mxid_localpart("tĂȘst".encode("utf-8")), "t=c3=aast"
- )
+ self.assertEqual(map_username_to_mxid_localpart("tĂȘst".encode()), "t=c3=aast")
diff --git a/tests/test_utils/html_parsers.py b/tests/test_utils/html_parsers.py
index 1fbb38f4be..e878af5f12 100644
--- a/tests/test_utils/html_parsers.py
+++ b/tests/test_utils/html_parsers.py
@@ -23,13 +23,13 @@ class TestHtmlParser(HTMLParser):
super().__init__()
# a list of links found in the doc
- self.links = [] # type: List[str]
+ self.links: List[str] = []
# the values of any hidden <input>s: map from name to value
- self.hiddens = {} # type: Dict[str, Optional[str]]
+ self.hiddens: Dict[str, Optional[str]] = {}
# the values of any radio buttons: map from name to list of values
- self.radios = {} # type: Dict[str, List[Optional[str]]]
+ self.radios: Dict[str, List[Optional[str]]] = {}
def handle_starttag(
self, tag: str, attrs: Iterable[Tuple[str, Optional[str]]]
diff --git a/tests/unittest.py b/tests/unittest.py
index 74db7c08f1..c6d9064423 100644
--- a/tests/unittest.py
+++ b/tests/unittest.py
@@ -140,7 +140,7 @@ class TestCase(unittest.TestCase):
try:
self.assertEquals(attrs[key], getattr(obj, key))
except AssertionError as e:
- raise (type(e))("Assert error for '.{}':".format(key)) from e
+ raise (type(e))(f"Assert error for '.{key}':") from e
def assert_dict(self, required, actual):
"""Does a partial assert of a dict.
@@ -520,7 +520,7 @@ class HomeserverTestCase(TestCase):
if not isinstance(deferred, Deferred):
return d
- results = [] # type: list
+ results: list = []
deferred.addBoth(results.append)
self.pump(by=by)
diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py
index 0277998cbe..39947a166b 100644
--- a/tests/util/caches/test_descriptors.py
+++ b/tests/util/caches/test_descriptors.py
@@ -174,7 +174,7 @@ class DescriptorTestCase(unittest.TestCase):
return self.result
obj = Cls()
- callbacks = set() # type: Set[str]
+ callbacks: Set[str] = set()
# set off an asynchronous request
obj.result = origin_d = defer.Deferred()
diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py
index e712eb42ea..3c0ddd4f18 100644
--- a/tests/util/test_itertools.py
+++ b/tests/util/test_itertools.py
@@ -44,7 +44,7 @@ class ChunkSeqTests(TestCase):
)
def test_empty_input(self):
- parts = chunk_seq([], 5) # type: Iterable[Sequence]
+ parts: Iterable[Sequence] = chunk_seq([], 5)
self.assertEqual(
list(parts),
@@ -56,13 +56,13 @@ class SortTopologically(TestCase):
def test_empty(self):
"Test that an empty graph works correctly"
- graph = {} # type: Dict[int, List[int]]
+ graph: Dict[int, List[int]] = {}
self.assertEqual(list(sorted_topologically([], graph)), [])
def test_handle_empty_graph(self):
"Test that a graph where a node doesn't have an entry is treated as empty"
- graph = {} # type: Dict[int, List[int]]
+ graph: Dict[int, List[int]] = {}
# For disconnected nodes the output is simply sorted.
self.assertEqual(list(sorted_topologically([1, 2], graph)), [1, 2])
@@ -70,7 +70,7 @@ class SortTopologically(TestCase):
def test_disconnected(self):
"Test that a graph with no edges work"
- graph = {1: [], 2: []} # type: Dict[int, List[int]]
+ graph: Dict[int, List[int]] = {1: [], 2: []}
# For disconnected nodes the output is simply sorted.
self.assertEqual(list(sorted_topologically([1, 2], graph)), [1, 2])
@@ -78,19 +78,19 @@ class SortTopologically(TestCase):
def test_linear(self):
"Test that a simple `4 -> 3 -> 2 -> 1` graph works"
- graph = {1: [], 2: [1], 3: [2], 4: [3]} # type: Dict[int, List[int]]
+ graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]}
self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4])
def test_subset(self):
"Test that only sorting a subset of the graph works"
- graph = {1: [], 2: [1], 3: [2], 4: [3]} # type: Dict[int, List[int]]
+ graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3]}
self.assertEqual(list(sorted_topologically([4, 3], graph)), [3, 4])
def test_fork(self):
"Test that a forked graph works"
- graph = {1: [], 2: [1], 3: [1], 4: [2, 3]} # type: Dict[int, List[int]]
+ graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [1], 4: [2, 3]}
# Valid orderings are `[1, 3, 2, 4]` or `[1, 2, 3, 4]`, but we should
# always get the same one.
@@ -98,12 +98,12 @@ class SortTopologically(TestCase):
def test_duplicates(self):
"Test that a graph with duplicate edges work"
- graph = {1: [], 2: [1, 1], 3: [2, 2], 4: [3]} # type: Dict[int, List[int]]
+ graph: Dict[int, List[int]] = {1: [], 2: [1, 1], 3: [2, 2], 4: [3]}
self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4])
def test_multiple_paths(self):
"Test that a graph with multiple paths between two nodes work"
- graph = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]} # type: Dict[int, List[int]]
+ graph: Dict[int, List[int]] = {1: [], 2: [1], 3: [2], 4: [3, 2, 1]}
self.assertEqual(list(sorted_topologically([4, 3, 2, 1], graph)), [1, 2, 3, 4])
|